/kernel/linux/linux-6.6/fs/btrfs/ |
H A D | lzo.c | 140 struct page *cur_page; in copy_compressed_data_to_page() local 152 cur_page = out_pages[*cur_out / PAGE_SIZE]; in copy_compressed_data_to_page() 154 if (!cur_page) { in copy_compressed_data_to_page() 155 cur_page = alloc_page(GFP_NOFS); in copy_compressed_data_to_page() 156 if (!cur_page) in copy_compressed_data_to_page() 158 out_pages[*cur_out / PAGE_SIZE] = cur_page; in copy_compressed_data_to_page() 161 kaddr = kmap_local_page(cur_page); in copy_compressed_data_to_page() 178 cur_page = out_pages[*cur_out / PAGE_SIZE]; in copy_compressed_data_to_page() 180 if (!cur_page) { in copy_compressed_data_to_page() 181 cur_page in copy_compressed_data_to_page() 316 struct page *cur_page; copy_compressed_segment() local 366 struct page *cur_page; lzo_decompress_bio() local [all...] |
/kernel/linux/linux-5.10/fs/ntfs/ |
H A D | compress.c | 499 unsigned int xpage, max_page, cur_page, cur_ofs, i; in ntfs_read_compressed_block() local 587 cur_page = 0; in ntfs_read_compressed_block() 722 cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size; in ntfs_read_compressed_block() 737 for (; cur_page < cb_max_page; cur_page++) { in ntfs_read_compressed_block() 738 page = pages[cur_page]; in ntfs_read_compressed_block() 750 if (cur_page == xpage) in ntfs_read_compressed_block() 754 pages[cur_page] = NULL; in ntfs_read_compressed_block() 763 page = pages[cur_page]; in ntfs_read_compressed_block() 775 unsigned int cur2_page = cur_page; in ntfs_read_compressed_block() [all...] |
/kernel/linux/linux-6.6/fs/ntfs/ |
H A D | compress.c | 499 unsigned int xpage, max_page, cur_page, cur_ofs, i; in ntfs_read_compressed_block() local 587 cur_page = 0; in ntfs_read_compressed_block() 722 cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size; in ntfs_read_compressed_block() 737 for (; cur_page < cb_max_page; cur_page++) { in ntfs_read_compressed_block() 738 page = pages[cur_page]; in ntfs_read_compressed_block() 750 if (cur_page == xpage) in ntfs_read_compressed_block() 754 pages[cur_page] = NULL; in ntfs_read_compressed_block() 763 page = pages[cur_page]; in ntfs_read_compressed_block() 775 unsigned int cur2_page = cur_page; in ntfs_read_compressed_block() [all...] |
/kernel/linux/linux-6.6/drivers/xen/ |
H A D | xen-front-pgdir-shbuf.c | 251 int ret, cur_gref, cur_dir_page, cur_page, grefs_left; in backend_map() local 272 cur_page = 0; in backend_map() 284 addr = xen_page_to_vaddr(buf->pages[cur_page]); in backend_map() 285 gnttab_set_map_op(&map_ops[cur_page], addr, in backend_map() 289 cur_page++; in backend_map() 298 for (cur_page = 0; cur_page < buf->num_pages; cur_page++) { in backend_map() 299 if (likely(map_ops[cur_page].status == GNTST_okay)) { in backend_map() 300 buf->backend_map_handles[cur_page] in backend_map() [all...] |
/kernel/linux/linux-5.10/drivers/xen/ |
H A D | xen-front-pgdir-shbuf.c | 260 int ret, cur_gref, cur_dir_page, cur_page, grefs_left; in backend_map() local 281 cur_page = 0; in backend_map() 293 addr = xen_page_to_vaddr(buf->pages[cur_page]); in backend_map() 294 gnttab_set_map_op(&map_ops[cur_page], addr, in backend_map() 298 cur_page++; in backend_map() 307 for (cur_page = 0; cur_page < buf->num_pages; cur_page++) { in backend_map() 308 buf->backend_map_handles[cur_page] = map_ops[cur_page] in backend_map() [all...] |
/kernel/linux/linux-5.10/fs/ |
H A D | direct-io.c | 101 struct page *cur_page; /* The page */ member 705 * Attempt to put the current chunk of 'cur_page' into the current BIO. If 715 ret = bio_add_page(sdio->bio, sdio->cur_page, in dio_bio_add_page() 723 get_page(sdio->cur_page); in dio_bio_add_page() 734 * Put cur_page under IO. The section of cur_page which is described by 735 * cur_page_offset,cur_page_len is put into a BIO. The section of cur_page 740 * The caller of this function is responsible for removing cur_page from the 825 if (sdio->cur_page == page && in submit_page_section() 836 if (sdio->cur_page) { in submit_page_section() [all...] |
/kernel/linux/linux-6.6/fs/ |
H A D | direct-io.c | 100 struct page *cur_page; /* The page */ member 681 * Attempt to put the current chunk of 'cur_page' into the current BIO. If 691 ret = bio_add_page(sdio->bio, sdio->cur_page, in dio_bio_add_page() 699 dio_pin_page(dio, sdio->cur_page); in dio_bio_add_page() 710 * Put cur_page under IO. The section of cur_page which is described by 711 * cur_page_offset,cur_page_len is put into a BIO. The section of cur_page 716 * The caller of this function is responsible for removing cur_page from the 802 if (sdio->cur_page == page && in submit_page_section() 813 if (sdio->cur_page) { in submit_page_section() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/ttm/ |
H A D | ttm_tt.c | 153 struct page *cur_page; in ttm_tt_set_caching() local 169 cur_page = ttm->pages[i]; in ttm_tt_set_caching() 170 if (likely(cur_page != NULL)) { in ttm_tt_set_caching() 171 ret = ttm_tt_set_page_caching(cur_page, in ttm_tt_set_caching() 185 cur_page = ttm->pages[j]; in ttm_tt_set_caching() 186 if (likely(cur_page != NULL)) { in ttm_tt_set_caching() 187 (void)ttm_tt_set_page_caching(cur_page, c_state, in ttm_tt_set_caching()
|
/kernel/linux/linux-5.10/lib/ |
H A D | scatterlist.c | 434 unsigned int chunks, cur_page, seg_len, i, prv_len = 0; in __sg_alloc_table_from_pages() local 484 cur_page = 0; in __sg_alloc_table_from_pages() 490 for (j = cur_page + 1; j < n_pages; j++) { in __sg_alloc_table_from_pages() 509 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; in __sg_alloc_table_from_pages() 510 sg_set_page(s, pages[cur_page], in __sg_alloc_table_from_pages() 515 cur_page = j; in __sg_alloc_table_from_pages()
|
/kernel/linux/linux-5.10/fs/f2fs/ |
H A D | checkpoint.c | 908 struct page *cp1, *cp2, *cur_page; in f2fs_get_valid_checkpoint() local 935 cur_page = cp2; in f2fs_get_valid_checkpoint() 937 cur_page = cp1; in f2fs_get_valid_checkpoint() 939 cur_page = cp1; in f2fs_get_valid_checkpoint() 941 cur_page = cp2; in f2fs_get_valid_checkpoint() 947 cp_block = (struct f2fs_checkpoint *)page_address(cur_page); in f2fs_get_valid_checkpoint() 950 if (cur_page == cp1) in f2fs_get_valid_checkpoint() 965 if (cur_page == cp2) in f2fs_get_valid_checkpoint() 972 cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i); in f2fs_get_valid_checkpoint() 973 if (IS_ERR(cur_page)) { in f2fs_get_valid_checkpoint() [all...] |
/kernel/linux/linux-6.6/fs/f2fs/ |
H A D | checkpoint.c | 921 struct page *cp1, *cp2, *cur_page; in f2fs_get_valid_checkpoint() local 948 cur_page = cp2; in f2fs_get_valid_checkpoint() 950 cur_page = cp1; in f2fs_get_valid_checkpoint() 952 cur_page = cp1; in f2fs_get_valid_checkpoint() 954 cur_page = cp2; in f2fs_get_valid_checkpoint() 960 cp_block = (struct f2fs_checkpoint *)page_address(cur_page); in f2fs_get_valid_checkpoint() 963 if (cur_page == cp1) in f2fs_get_valid_checkpoint() 978 if (cur_page == cp2) in f2fs_get_valid_checkpoint() 985 cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i); in f2fs_get_valid_checkpoint() 986 if (IS_ERR(cur_page)) { in f2fs_get_valid_checkpoint() [all...] |
/kernel/linux/linux-6.6/lib/ |
H A D | scatterlist.c | 459 unsigned int chunks, cur_page, seg_len, i, prv_len = 0; in sg_alloc_append_table_from_pages() local 512 cur_page = 0; in sg_alloc_append_table_from_pages() 518 for (j = cur_page + 1; j < n_pages; j++) { in sg_alloc_append_table_from_pages() 537 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; in sg_alloc_append_table_from_pages() 538 sg_set_page(s, pages[cur_page], in sg_alloc_append_table_from_pages() 543 cur_page = j; in sg_alloc_append_table_from_pages()
|
/kernel/linux/linux-5.10/fs/iomap/ |
H A D | buffered-io.c | 202 struct page *cur_page; member 241 struct page *page = ctx->cur_page; in iomap_readpage_actor() 318 struct iomap_readpage_ctx ctx = { .cur_page = page }; in iomap_readpage() 361 if (ctx->cur_page && offset_in_page(pos + done) == 0) { in iomap_readahead_actor() 363 unlock_page(ctx->cur_page); in iomap_readahead_actor() 364 put_page(ctx->cur_page); in iomap_readahead_actor() 365 ctx->cur_page = NULL; in iomap_readahead_actor() 367 if (!ctx->cur_page) { in iomap_readahead_actor() 368 ctx->cur_page = readahead_page(ctx->rac); in iomap_readahead_actor() 417 if (ctx.cur_page) { in iomap_readahead() [all...] |
/kernel/linux/linux-6.6/drivers/net/wwan/t7xx/ |
H A D | t7xx_hif_dpmaif_rx.c | 339 struct dpmaif_bat_page *cur_page = bat_skb + cur_bat_idx; in t7xx_dpmaif_rx_frag_alloc() local 343 if (!cur_page->page) { in t7xx_dpmaif_rx_frag_alloc() 363 cur_page->page = page; in t7xx_dpmaif_rx_frag_alloc() 364 cur_page->data_bus_addr = data_base_addr; in t7xx_dpmaif_rx_frag_alloc() 365 cur_page->offset = offset; in t7xx_dpmaif_rx_frag_alloc() 366 cur_page->data_len = bat_req->pkt_buf_sz; in t7xx_dpmaif_rx_frag_alloc() 369 data_base_addr = cur_page->data_bus_addr; in t7xx_dpmaif_rx_frag_alloc()
|
/kernel/linux/linux-6.6/arch/x86/include/asm/ |
H A D | sev-common.h | 124 u64 cur_page : 12, member
|
/kernel/linux/linux-6.6/drivers/accel/habanalabs/common/ |
H A D | memory.c | 1540 int rc, i, j, nents, cur_page; in alloc_sgt_from_device_pages() local 1596 cur_page = 0; in alloc_sgt_from_device_pages() 1609 pages[cur_page] - prop->dram_base_address; in alloc_sgt_from_device_pages() 1627 cur_page++; in alloc_sgt_from_device_pages() 1635 for (j = cur_page + 1 ; j < cur_npages ; j++) { in alloc_sgt_from_device_pages() 1644 (pages[cur_page] - prop->dram_base_address); in alloc_sgt_from_device_pages() 1653 cur_page = j; in alloc_sgt_from_device_pages()
|
/kernel/linux/linux-6.6/fs/ntfs3/ |
H A D | fslog.c | 1789 struct RECORD_PAGE_HDR *cur_page = in last_log_lsn() local 1793 tail_page = cur_page; in last_log_lsn() 1805 cur_page->rhdr.lsn == page->rhdr.lsn && in last_log_lsn() 1806 cur_page->record_hdr.next_record_off == in last_log_lsn() 1813 cur_page = NULL; in last_log_lsn() 1821 lsn_cur = le64_to_cpu(cur_page->rhdr.lsn); in last_log_lsn() 1824 le64_to_cpu(cur_page->record_hdr.last_end_lsn) && in last_log_lsn() 1833 if (!is_log_record_end(cur_page)) { in last_log_lsn() 1841 log->last_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn); in last_log_lsn() 1842 log->ra->current_lsn = cur_page in last_log_lsn() [all...] |
/kernel/linux/linux-5.10/drivers/crypto/qat/qat_common/ |
H A D | icp_qat_uclo.h | 113 struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX]; member
|
H A D | qat_uclo.c | 1577 obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] = in qat_uclo_wr_uimage_page()
|
/kernel/linux/linux-6.6/drivers/crypto/intel/qat/qat_common/ |
H A D | icp_qat_uclo.h | 154 struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX]; member
|
H A D | qat_uclo.c | 2051 aed->ae_slices[s].cur_page[ctx] = in qat_uclo_wr_uimage_page()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gvt/ |
H A D | kvmgt.c | 150 struct page *cur_page; in gvt_pin_guest_page() local 153 IOMMU_READ | IOMMU_WRITE, &cur_page); in gvt_pin_guest_page() 161 base_page = cur_page; in gvt_pin_guest_page() 162 else if (page_to_pfn(base_page) + npage != page_to_pfn(cur_page)) { in gvt_pin_guest_page()
|