/kernel/linux/linux-5.10/mm/ |
H A D | percpu-vm.c | 48 * @page_end: page index of the last page to be freed + 1 50 * Free pages [@page_start and @page_end) in @pages for all units. 54 struct page **pages, int page_start, int page_end) in pcpu_free_pages() 60 for (i = page_start; i < page_end; i++) { in pcpu_free_pages() 74 * @page_end: page index of the last page to be allocated + 1 77 * Allocate pages [@page_start,@page_end) into @pages for all units. 82 struct page **pages, int page_start, int page_end, in pcpu_alloc_pages() 91 for (i = page_start; i < page_end; i++) { in pcpu_alloc_pages() 108 for (i = page_start; i < page_end; i++) in pcpu_alloc_pages() 118 * @page_end 53 pcpu_free_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_free_pages() argument 81 pcpu_alloc_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end, gfp_t gfp) pcpu_alloc_pages() argument 126 pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_pre_unmap_flush() argument 152 pcpu_unmap_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_unmap_pages() argument 184 pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_post_unmap_tlb_flush() argument 213 pcpu_map_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_map_pages() argument 254 pcpu_post_map_flush(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_post_map_flush() argument 275 pcpu_populate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end, gfp_t gfp) pcpu_populate_chunk() argument 308 pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_depopulate_chunk() argument [all...] |
H A D | percpu-km.c | 36 int page_start, int page_end, gfp_t gfp) in pcpu_populate_chunk() 42 int page_start, int page_end) in pcpu_depopulate_chunk() 35 pcpu_populate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end, gfp_t gfp) pcpu_populate_chunk() argument 41 pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_depopulate_chunk() argument
|
H A D | percpu.c | 1025 unsigned int page_start, page_end, rs, re; in pcpu_is_populated() local 1028 page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); in pcpu_is_populated() 1031 bitmap_next_clear_region(chunk->populated, &rs, &re, page_end); in pcpu_is_populated() 1032 if (rs >= page_end) in pcpu_is_populated() 1472 * @page_end: the end page 1474 * Pages in [@page_start,@page_end) have been populated to @chunk. Update 1482 int page_end) in pcpu_chunk_populated() 1484 int nr = page_end - page_start; in pcpu_chunk_populated() 1499 * @page_end: the end page 1501 * Pages in [@page_start,@page_end) hav 1481 pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_chunk_populated() argument 1505 pcpu_chunk_depopulated(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_chunk_depopulated() argument 1810 unsigned int page_start, page_end, rs, re; pcpu_alloc() local [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | percpu-vm.c | 49 * @page_end: page index of the last page to be freed + 1 51 * Free pages [@page_start and @page_end) in @pages for all units. 55 struct page **pages, int page_start, int page_end) in pcpu_free_pages() 61 for (i = page_start; i < page_end; i++) { in pcpu_free_pages() 75 * @page_end: page index of the last page to be allocated + 1 78 * Allocate pages [@page_start,@page_end) into @pages for all units. 83 struct page **pages, int page_start, int page_end, in pcpu_alloc_pages() 92 for (i = page_start; i < page_end; i++) { in pcpu_alloc_pages() 109 for (i = page_start; i < page_end; i++) in pcpu_alloc_pages() 119 * @page_end 54 pcpu_free_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_free_pages() argument 82 pcpu_alloc_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end, gfp_t gfp) pcpu_alloc_pages() argument 127 pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_pre_unmap_flush() argument 153 pcpu_unmap_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_unmap_pages() argument 185 pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_post_unmap_tlb_flush() argument 214 pcpu_map_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_map_pages() argument 255 pcpu_post_map_flush(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_post_map_flush() argument 276 pcpu_populate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end, gfp_t gfp) pcpu_populate_chunk() argument 312 pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_depopulate_chunk() argument [all...] |
H A D | percpu-km.c | 36 int page_start, int page_end) in pcpu_post_unmap_tlb_flush() 42 int page_start, int page_end, gfp_t gfp) in pcpu_populate_chunk() 48 int page_start, int page_end) in pcpu_depopulate_chunk() 35 pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_post_unmap_tlb_flush() argument 41 pcpu_populate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end, gfp_t gfp) pcpu_populate_chunk() argument 47 pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_depopulate_chunk() argument
|
H A D | percpu.c | 1514 * @page_end: the end page 1516 * Pages in [@page_start,@page_end) have been populated to @chunk. Update 1521 int page_end) in pcpu_chunk_populated() 1523 int nr = page_end - page_start; in pcpu_chunk_populated() 1538 * @page_end: the end page 1540 * Pages in [@page_start,@page_end) have been depopulated from @chunk. 1545 int page_start, int page_end) in pcpu_chunk_depopulated() 1547 int nr = page_end - page_start; in pcpu_chunk_depopulated() 1575 int page_start, int page_end, gfp_t gfp); 1577 int page_start, int page_end); 1520 pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_chunk_populated() argument 1544 pcpu_chunk_depopulated(struct pcpu_chunk *chunk, int page_start, int page_end) pcpu_chunk_depopulated() argument 1847 unsigned int page_end, rs, re; pcpu_alloc() local [all...] |
/kernel/linux/linux-5.10/drivers/staging/fbtft/ |
H A D | fb_sh1106.c | 115 int page, page_start, page_end, x, i, ret; in write_vmem() local 120 page_end = DIV_ROUND_UP(offset + len, 8 * 2 * xres); in write_vmem() 122 for (page = page_start; page < page_end; page++) { in write_vmem()
|
/kernel/linux/linux-6.6/drivers/staging/fbtft/ |
H A D | fb_sh1106.c | 115 int page, page_start, page_end, x, i, ret; in write_vmem() local 120 page_end = DIV_ROUND_UP(offset + len, 8 * 2 * xres); in write_vmem() 122 for (page = page_start; page < page_end; page++) { in write_vmem()
|
/kernel/linux/linux-6.6/drivers/video/fbdev/ |
H A D | ssd1307fb.c | 89 u8 page_end; member 189 u8 page_end = page_start + pages - 1; in ssd1307fb_set_page_range() local 192 if (page_start == par->page_start && page_end == par->page_end) in ssd1307fb_set_page_range() 203 ret = ssd1307fb_write_cmd(par->client, page_end); in ssd1307fb_set_page_range() 208 par->page_end = page_end; in ssd1307fb_set_page_range()
|
/kernel/linux/linux-6.6/fs/btrfs/ |
H A D | compression.c | 355 u64 page_end; in add_ra_bio_pages() local 403 page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1; in add_ra_bio_pages() 404 lock_extent(tree, cur, page_end, NULL); in add_ra_bio_pages() 406 em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur); in add_ra_bio_pages() 418 unlock_extent(tree, cur, page_end, NULL); in add_ra_bio_pages() 435 add_size = min(em->start + em->len, page_end + 1) - cur; in add_ra_bio_pages() 438 unlock_extent(tree, cur, page_end, NULL); in add_ra_bio_pages()
|
H A D | defrag.c | 725 u64 page_end = page_start + PAGE_SIZE - 1; in defrag_prepare_one_page() local 760 lock_extent(&inode->io_tree, page_start, page_end, &cached_state); in defrag_prepare_one_page() 762 unlock_extent(&inode->io_tree, page_start, page_end, in defrag_prepare_one_page()
|
H A D | inode.c | 416 u64 page_start = 0, page_end = 0; in btrfs_cleanup_ordered_extents() local 421 page_end = page_start + PAGE_SIZE - 1; in btrfs_cleanup_ordered_extents() 464 if (page_start >= offset && page_end <= (offset + bytes - 1)) { in btrfs_cleanup_ordered_extents() 2693 u64 page_end = page_offset(page) + PAGE_SIZE - 1; in btrfs_writepage_fixup_worker() local 2746 lock_extent(&inode->io_tree, page_start, page_end, &cached_state); in btrfs_writepage_fixup_worker() 2754 unlock_extent(&inode->io_tree, page_start, page_end, in btrfs_writepage_fixup_worker() 2762 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, in btrfs_writepage_fixup_worker() 2781 unlock_extent(&inode->io_tree, page_start, page_end, &cached_state); in btrfs_writepage_fixup_worker() 7935 u64 page_end = page_start + folio_size(folio) - 1; in btrfs_invalidate_folio() local 7973 lock_extent(tree, page_start, page_end, in btrfs_invalidate_folio() 8125 u64 page_end; btrfs_page_mkwrite() local [all...] |
H A D | relocation.c | 2997 u64 page_end; in relocate_one_page() local 3035 page_end = page_start + PAGE_SIZE - 1; in relocate_one_page() 3042 while (cur <= page_end) { in relocate_one_page() 3048 u64 clamped_end = min(page_end, extent_end); in relocate_one_page()
|
H A D | extent_io.c | 1176 const u64 page_end = page_start + PAGE_SIZE - 1; in writepage_delalloc() local 1178 u64 delalloc_end = page_end; in writepage_delalloc() 1182 while (delalloc_start < page_end) { in writepage_delalloc() 1183 delalloc_end = page_end; in writepage_delalloc()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/solomon/ |
H A D | ssd130x.h | 91 u8 page_end; member
|
H A D | ssd130x.c | 222 u8 page_end = page_start + pages - 1; in ssd130x_set_page_range() local 225 if (page_start == ssd130x->page_start && page_end == ssd130x->page_end) in ssd130x_set_page_range() 228 ret = ssd130x_write_cmd(ssd130x, 3, SSD130X_SET_PAGE_RANGE, page_start, page_end); in ssd130x_set_page_range() 233 ssd130x->page_end = page_end; in ssd130x_set_page_range()
|
/kernel/linux/linux-5.10/fs/btrfs/ |
H A D | ioctl.c | 1290 u64 page_end; in cluster_pages_for_defrag() local 1326 page_end = page_start + PAGE_SIZE - 1; in cluster_pages_for_defrag() 1328 lock_extent_bits(tree, page_start, page_end, in cluster_pages_for_defrag() 1332 unlock_extent_cached(tree, page_start, page_end, in cluster_pages_for_defrag() 1386 page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE; in cluster_pages_for_defrag() 1389 page_start, page_end - 1, &cached_state); in cluster_pages_for_defrag() 1405 while (search_start < page_end) { in cluster_pages_for_defrag() 1409 page_end - search_start); in cluster_pages_for_defrag() 1425 page_end - 1, EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | in cluster_pages_for_defrag() 1437 set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end in cluster_pages_for_defrag() [all...] |
H A D | inode.c | 115 u64 page_end = page_start + PAGE_SIZE - 1; in btrfs_cleanup_ordered_extents() local 133 if (page_start >= offset && page_end <= (offset + bytes - 1)) { in btrfs_cleanup_ordered_extents() 2338 u64 page_end; in btrfs_writepage_fixup_worker() local 2346 page_end = page_offset(page) + PAGE_SIZE - 1; in btrfs_writepage_fixup_worker() 2397 lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state); in btrfs_writepage_fixup_worker() 2405 unlock_extent_cached(&inode->io_tree, page_start, page_end, in btrfs_writepage_fixup_worker() 2413 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0, in btrfs_writepage_fixup_worker() 2432 unlock_extent_cached(&inode->io_tree, page_start, page_end, in btrfs_writepage_fixup_worker() 2441 end_extent_writepage(page, ret, page_start, page_end); in btrfs_writepage_fixup_worker() 8217 u64 page_end in btrfs_invalidatepage() local 8359 u64 page_end; btrfs_page_mkwrite() local [all...] |
H A D | relocation.c | 2700 u64 page_end; in relocate_file_extent_cluster() local 2775 page_end = page_start + PAGE_SIZE - 1; in relocate_file_extent_cluster() 2777 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end); in relocate_file_extent_cluster() 2784 page_start, page_end, in relocate_file_extent_cluster() 2790 page_end, 0, NULL); in relocate_file_extent_cluster() 2800 page_start, page_end, in relocate_file_extent_cluster() 2808 page_start, page_end); in relocate_file_extent_cluster()
|
H A D | extent_io.c | 3385 u64 page_end = delalloc_start + PAGE_SIZE - 1; in writepage_delalloc() local 3393 while (delalloc_end < page_end) { in writepage_delalloc() 3464 u64 page_end = start + PAGE_SIZE - 1; in __extent_writepage_io() local 3478 ret = btrfs_writepage_cow_fixup(page, start, page_end); in __extent_writepage_io() 3493 end = page_end; in __extent_writepage_io() 3502 page_end, 1); in __extent_writepage_io() 3580 u64 page_end = start + PAGE_SIZE - 1; in __extent_writepage() local 3636 end_extent_writepage(page, ret, start, page_end); in __extent_writepage()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/broadcom/ |
H A D | bnx2.c | 4579 u32 page_start, page_end, data_start, data_end; in bnx2_nvram_write() local 4586 /* Find the page_end addr */ in bnx2_nvram_write() 4587 page_end = page_start + bp->flash_info->page_size; in bnx2_nvram_write() 4591 data_end = (page_end > offset32 + len32) ? in bnx2_nvram_write() 4592 (offset32 + len32) : page_end; in bnx2_nvram_write() 4653 if ((addr == page_end - 4) || in bnx2_nvram_write() 4670 * to page_end */ in bnx2_nvram_write() 4672 for (addr = data_end; addr < page_end; in bnx2_nvram_write() 4675 if (addr == page_end-4) { in bnx2_nvram_write()
|
/kernel/linux/linux-6.6/drivers/net/ethernet/broadcom/ |
H A D | bnx2.c | 4563 u32 page_start, page_end, data_start, data_end; in bnx2_nvram_write() local 4570 /* Find the page_end addr */ in bnx2_nvram_write() 4571 page_end = page_start + bp->flash_info->page_size; in bnx2_nvram_write() 4575 data_end = (page_end > offset32 + len32) ? in bnx2_nvram_write() 4576 (offset32 + len32) : page_end; in bnx2_nvram_write() 4637 if ((addr == page_end - 4) || in bnx2_nvram_write() 4654 * to page_end */ in bnx2_nvram_write() 4656 for (addr = data_end; addr < page_end; in bnx2_nvram_write() 4659 if (addr == page_end-4) { in bnx2_nvram_write()
|
/kernel/linux/linux-5.10/drivers/iommu/arm/arm-smmu-v3/ |
H A D | arm-smmu-v3.c | 1530 unsigned long page_start, page_end; in arm_smmu_atc_inv_to_cmd() local 1544 page_end = (iova + size - 1) >> inval_grain_shift; in arm_smmu_atc_inv_to_cmd() 1566 log2_span = fls_long(page_start ^ page_end); in arm_smmu_atc_inv_to_cmd()
|
/kernel/linux/linux-6.6/drivers/iommu/arm/arm-smmu-v3/ |
H A D | arm-smmu-v3.c | 1733 unsigned long page_start, page_end; in arm_smmu_atc_inv_to_cmd() local 1761 page_end = (iova + size - 1) >> inval_grain_shift; in arm_smmu_atc_inv_to_cmd() 1783 log2_span = fls_long(page_start ^ page_end); in arm_smmu_atc_inv_to_cmd()
|
/kernel/linux/linux-5.10/fs/ocfs2/ |
H A D | alloc.c | 7089 unsigned int page_end = min_t(unsigned, PAGE_SIZE, in ocfs2_convert_inline_data_to_extents() local 7114 ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page, in ocfs2_convert_inline_data_to_extents() 7133 ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0, in ocfs2_convert_inline_data_to_extents()
|