Home
last modified time | relevance | path

Searched refs:page (Results 451 - 475 of 6703) sorted by relevance

1...<<11121314151617181920>>...269

/kernel/linux/linux-6.6/kernel/dma/
H A Dcontiguous.c30 * inaccessible to page system even if device drivers don't use it.
46 #include <asm/page.h>
310 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, in dma_alloc_from_contiguous()
329 bool dma_release_from_contiguous(struct device *dev, struct page *pages, in dma_release_from_contiguous()
335 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp) in cma_alloc_aligned()
352 * Note that it bypass one-page size of allocations from the per-numa and
353 * global area as the addresses within one page are always contiguous, so
357 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) in dma_alloc_contiguous()
374 struct page *page; in dma_alloc_contiguous() local
407 dma_free_contiguous(struct device *dev, struct page *page, size_t size) dma_free_contiguous() argument
[all...]
/kernel/linux/linux-5.10/drivers/xen/
H A Dgrant-table.c54 #include <xen/page.h>
114 * granted domain, frame is the page frame to be granted, and flags is
352 struct page *page; member
377 entry->ref, page_to_pfn(entry->page)); in gnttab_handle_deferred()
378 put_page(entry->page); in gnttab_handle_deferred()
401 struct page *page) in gnttab_add_deferred()
408 if (!page) { in gnttab_add_deferred()
411 page in gnttab_add_deferred()
400 gnttab_add_deferred(grant_ref_t ref, bool readonly, struct page *page) gnttab_add_deferred() argument
446 gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page) gnttab_end_foreign_access() argument
836 struct page *page; cache_deq() local
844 cache_enq(struct gnttab_page_cache *cache, struct page *page) cache_enq() argument
862 struct page *page; cache_deq() local
870 cache_enq(struct gnttab_page_cache *cache, struct page *page) cache_enq() argument
884 gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page) gnttab_page_cache_get() argument
904 gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page, unsigned int num) gnttab_page_cache_put() argument
922 struct page *page[10]; gnttab_page_cache_shrink() local
1004 struct page *page = pfn_to_page(pfn); gnttab_dma_alloc_pages() local
1115 gnttab_foreach_grant_in_range(struct page *page, unsigned int offset, unsigned int len, xen_grant_fn_t fn, void *data) gnttab_foreach_grant_in_range() argument
[all...]
/kernel/linux/linux-5.10/include/trace/events/
H A Dfscache.h367 TP_PROTO(struct fscache_cookie *cookie, struct page *page,
370 TP_ARGS(cookie, page, why),
374 __field(pgoff_t, page )
380 __entry->page = page->index;
387 __entry->page)
391 TP_PROTO(struct fscache_cookie *cookie, struct page *page,
394 TP_ARGS(cookie, page, va
[all...]
/kernel/linux/linux-5.10/fs/ext4/
H A Dpage-io.c3 * linux/fs/ext4/page-io.c
106 struct page *page = bvec->bv_page; in ext4_finish_bio() local
107 struct page *bounce_page = NULL; in ext4_finish_bio()
114 if (!page) in ext4_finish_bio()
117 if (fscrypt_is_bounce_page(page)) { in ext4_finish_bio()
118 bounce_page = page; in ext4_finish_bio()
119 page = fscrypt_pagecache_page(bounce_page); in ext4_finish_bio()
123 SetPageError(page); in ext4_finish_bio()
124 mapping_set_error(page in ext4_finish_bio()
442 ext4_bio_write_page(struct ext4_io_submit *io, struct page *page, int len, struct writeback_control *wbc, bool keep_towrite) ext4_bio_write_page() argument
[all...]
/kernel/linux/linux-5.10/fs/nilfs2/
H A Dbtnode.c20 #include "page.h"
78 struct page *page; in nilfs_btnode_submit_block() local
86 page = bh->b_page; in nilfs_btnode_submit_block()
133 unlock_page(page); in nilfs_btnode_submit_block()
134 put_page(page); in nilfs_btnode_submit_block()
142 * nilfs_btnode_delete() invalidates the specified buffer and delete the page
143 * including the buffer if the page gets unbusy.
148 struct page *page in nilfs_btnode_delete() local
[all...]
/kernel/linux/linux-6.6/fs/nilfs2/
H A Dbtnode.c20 #include "page.h"
78 struct page *page; in nilfs_btnode_submit_block() local
86 page = bh->b_page; in nilfs_btnode_submit_block()
133 unlock_page(page); in nilfs_btnode_submit_block()
134 put_page(page); in nilfs_btnode_submit_block()
142 * nilfs_btnode_delete() invalidates the specified buffer and delete the page
143 * including the buffer if the page gets unbusy.
148 struct page *page in nilfs_btnode_delete() local
[all...]
/kernel/linux/linux-6.6/fs/f2fs/
H A Dcheckpoint.c39 * We guarantee no failure on the returned page.
41 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in f2fs_grab_meta_page()
44 struct page *page; in f2fs_grab_meta_page() local
46 page = f2fs_grab_cache_page(mapping, index, false); in f2fs_grab_meta_page()
47 if (!page) { in f2fs_grab_meta_page()
51 f2fs_wait_on_page_writeback(page, META, true, true); in f2fs_grab_meta_page()
52 if (!PageUptodate(page)) in f2fs_grab_meta_page()
53 SetPageUptodate(page); in f2fs_grab_meta_page()
54 return page; in f2fs_grab_meta_page()
61 struct page *page; __get_meta_page() local
117 struct page *page; f2fs_get_meta_page_retry() local
239 struct page *page; f2fs_ra_meta_pages() local
312 struct page *page; f2fs_ra_meta_pages_cond() local
327 __f2fs_write_meta_page(struct page *page, struct writeback_control *wbc, enum iostat_type io_type) __f2fs_write_meta_page() argument
367 f2fs_write_meta_page(struct page *page, struct writeback_control *wbc) f2fs_write_meta_page() argument
743 struct page *page; f2fs_recover_orphan_inodes() local
779 struct page *page = NULL; write_orphan_inodes() local
1405 struct page *page = f2fs_grab_meta_page(sbi, blk_addr); commit_checkpoint() local
[all...]
/kernel/linux/linux-5.10/fs/f2fs/
H A Df2fs.h13 #include <linux/page-flags.h>
275 struct page *page; /* warm node page pointer */ member
539 ALLOC_NODE, /* allocate a new node page if needed */
694 FI_DROP_CACHE, /* drop dirty page cache */
904 struct page *inode_page; /* its inode page, NULL is possible */
905 struct page *node_page; /* cached direct node page */
1135 struct page *page; /* page to be written */ global() member
1754 F2FS_P_SB(struct page *page) F2FS_P_SB() argument
1769 F2FS_NODE(struct page *page) F2FS_NODE() argument
1774 F2FS_INODE(struct page *page) F2FS_INODE() argument
2390 struct page *page; f2fs_grab_cache_page() local
2435 f2fs_put_page(struct page *page, int unlock) f2fs_put_page() argument
2517 IS_INODE(struct page *page) IS_INODE() argument
2850 inline_xattr_addr(struct inode *inode, struct page *page) inline_xattr_addr() argument
2915 inline_data_addr(struct inode *inode, struct page *page) inline_data_addr() argument
3161 f2fs_set_page_private(struct page *page, unsigned long data) f2fs_set_page_private() argument
3170 f2fs_clear_page_private(struct page *page) f2fs_clear_page_private() argument
3959 f2fs_is_compressed_page(struct page *page) f2fs_is_compressed_page() argument
3967 f2fs_compress_control_page(struct page *page) f2fs_compress_control_page() argument
[all...]
/kernel/linux/linux-5.10/include/xen/
H A Dmem-reservation.h18 #include <xen/page.h>
22 static inline void xenmem_reservation_scrub_page(struct page *page) in xenmem_reservation_scrub_page() argument
25 clear_highpage(page); in xenmem_reservation_scrub_page()
30 struct page **pages,
34 struct page **pages);
38 struct page **pages, in xenmem_reservation_va_mapping_update()
48 struct page **pages) in xenmem_reservation_va_mapping_reset()
/kernel/linux/linux-6.6/include/xen/
H A Dmem-reservation.h18 #include <xen/page.h>
22 static inline void xenmem_reservation_scrub_page(struct page *page) in xenmem_reservation_scrub_page() argument
25 clear_highpage(page); in xenmem_reservation_scrub_page()
30 struct page **pages,
34 struct page **pages);
38 struct page **pages, in xenmem_reservation_va_mapping_update()
48 struct page **pages) in xenmem_reservation_va_mapping_reset()
/kernel/linux/linux-6.6/arch/s390/mm/
H A Dgmap.c24 #include <asm/page.h>
29 static struct page *gmap_alloc_crst(void) in gmap_alloc_crst()
31 struct page *page; in gmap_alloc_crst() local
33 page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); in gmap_alloc_crst()
34 if (!page) in gmap_alloc_crst()
36 arch_set_page_dat(page, CRST_ALLOC_ORDER); in gmap_alloc_crst()
37 return page; in gmap_alloc_crst()
49 struct page *page; in gmap_alloc() local
196 struct page *page, *next; gmap_free() local
319 struct page *page; gmap_alloc_table() local
350 struct page *page; __gmap_segment_gaddr() local
1351 struct page *page; gmap_unshadow_pgt() local
1380 struct page *page; __gmap_unshadow_sgt() local
1409 struct page *page; gmap_unshadow_sgt() local
1438 struct page *page; __gmap_unshadow_r3t() local
1467 struct page *page; gmap_unshadow_r3t() local
1497 struct page *page; __gmap_unshadow_r2t() local
1524 struct page *page; gmap_unshadow_r2t() local
1555 struct page *page; __gmap_unshadow_r1t() local
1770 struct page *page; gmap_shadow_r2t() local
1854 struct page *page; gmap_shadow_r3t() local
1938 struct page *page; gmap_shadow_sgt() local
2021 struct page *page; gmap_shadow_pgt_lookup() local
2061 struct page *page; gmap_shadow_pgt() local
2649 struct page *page = pmd_page(*pmd); __s390_enable_skey_hugetlb() local
2862 struct page *page; s390_replace_asce() local
[all...]
/kernel/linux/linux-5.10/arch/x86/kernel/
H A Despfix_64.c22 * This file sets up the ministacks and the related page tables. The
61 /* Page allocation bitmap - each page serves ESPFIX_STACKS_PER_PAGE CPUs */
73 * we have to account for some amount of padding at the end of each page.
77 unsigned long page, slot; in espfix_base_addr() local
80 page = (cpu / ESPFIX_STACKS_PER_PAGE) ^ page_random; in espfix_base_addr()
82 addr = (page << PAGE_SHIFT) + (slot * ESPFIX_STACK_SIZE); in espfix_base_addr()
119 /* Install the espfix pud into the kernel page directory */ in init_espfix_bsp()
133 unsigned int page; in init_espfix_ap() local
147 page = cpu/ESPFIX_STACKS_PER_PAGE; in init_espfix_ap()
150 stack_page = READ_ONCE(espfix_pages[page]); in init_espfix_ap()
167 struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0); init_espfix_ap() local
179 struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0); init_espfix_ap() local
[all...]
/kernel/linux/linux-5.10/drivers/hwmon/pmbus/
H A Dltc3815.c27 static int ltc3815_read_byte_data(struct i2c_client *client, int page, int reg) in ltc3815_read_byte_data() argument
48 static int ltc3815_write_byte(struct i2c_client *client, int page, u8 reg) in ltc3815_write_byte() argument
72 static int ltc3815_read_word_data(struct i2c_client *client, int page, in ltc3815_read_word_data() argument
79 ret = pmbus_read_word_data(client, page, phase, in ltc3815_read_word_data()
83 ret = pmbus_read_word_data(client, page, phase, in ltc3815_read_word_data()
87 ret = pmbus_read_word_data(client, page, phase, in ltc3815_read_word_data()
91 ret = pmbus_read_word_data(client, page, phase, in ltc3815_read_word_data()
95 ret = pmbus_read_word_data(client, page, phase, in ltc3815_read_word_data()
112 static int ltc3815_write_word_data(struct i2c_client *client, int page, in ltc3815_write_word_data() argument
119 ret = pmbus_write_word_data(client, page, in ltc3815_write_word_data()
[all...]
/kernel/linux/linux-6.6/arch/x86/kernel/
H A Despfix_64.c22 * This file sets up the ministacks and the related page tables. The
61 /* Page allocation bitmap - each page serves ESPFIX_STACKS_PER_PAGE CPUs */
73 * we have to account for some amount of padding at the end of each page.
77 unsigned long page, slot; in espfix_base_addr() local
80 page = (cpu / ESPFIX_STACKS_PER_PAGE) ^ page_random; in espfix_base_addr()
82 addr = (page << PAGE_SHIFT) + (slot * ESPFIX_STACK_SIZE); in espfix_base_addr()
109 /* Install the espfix pud into the kernel page directory */ in init_espfix_bsp()
123 unsigned int page; in init_espfix_ap() local
137 page = cpu/ESPFIX_STACKS_PER_PAGE; in init_espfix_ap()
140 stack_page = READ_ONCE(espfix_pages[page]); in init_espfix_ap()
157 struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0); init_espfix_ap() local
169 struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0); init_espfix_ap() local
[all...]
/kernel/linux/linux-6.6/drivers/hwmon/pmbus/
H A Dltc3815.c27 static int ltc3815_read_byte_data(struct i2c_client *client, int page, int reg) in ltc3815_read_byte_data() argument
48 static int ltc3815_write_byte(struct i2c_client *client, int page, u8 reg) in ltc3815_write_byte() argument
72 static int ltc3815_read_word_data(struct i2c_client *client, int page, in ltc3815_read_word_data() argument
79 ret = pmbus_read_word_data(client, page, phase, in ltc3815_read_word_data()
83 ret = pmbus_read_word_data(client, page, phase, in ltc3815_read_word_data()
87 ret = pmbus_read_word_data(client, page, phase, in ltc3815_read_word_data()
91 ret = pmbus_read_word_data(client, page, phase, in ltc3815_read_word_data()
95 ret = pmbus_read_word_data(client, page, phase, in ltc3815_read_word_data()
112 static int ltc3815_write_word_data(struct i2c_client *client, int page, in ltc3815_write_word_data() argument
119 ret = pmbus_write_word_data(client, page, in ltc3815_write_word_data()
[all...]
H A Dstpddc60.c85 static int stpddc60_read_byte_data(struct i2c_client *client, int page, int reg) in stpddc60_read_byte_data() argument
89 if (page > 0) in stpddc60_read_byte_data()
109 static int stpddc60_read_word_data(struct i2c_client *client, int page, in stpddc60_read_word_data() argument
114 if (page > 0) in stpddc60_read_word_data()
119 ret = pmbus_read_word_data(client, page, phase, in stpddc60_read_word_data()
127 ret = pmbus_read_word_data(client, page, phase, reg); in stpddc60_read_word_data()
145 static int stpddc60_write_word_data(struct i2c_client *client, int page, in stpddc60_write_word_data() argument
151 if (page > 0) in stpddc60_write_word_data()
156 ret = pmbus_read_word_data(client, page, 0xff, in stpddc60_write_word_data()
161 ret = pmbus_write_byte_data(client, page, in stpddc60_write_word_data()
[all...]
/kernel/linux/linux-5.10/drivers/misc/
H A Dvmw_balloon.c143 * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
144 * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
256 * @pfn: the physical frame number of the page to be locked or unlocked.
266 * @max_page_size: maximum supported page size for ballooning.
273 * @size: balloon actual size in basic page size (frames).
281 * @target: balloon target size in basic page size (frames).
309 * @batch_page: pointer to communication batch page.
311 * When batching is used, batch_page points to a page, which holds up to
321 * disabled, only a single page can be locked/unlock on each operation.
328 * @page
335 struct page *page; global() member
604 vmballoon_mark_page_offline(struct page *page, enum vmballoon_page_size_type page_size) vmballoon_mark_page_offline() argument
619 vmballoon_mark_page_online(struct page *page, enum vmballoon_page_size_type page_size) vmballoon_mark_page_online() argument
670 struct page *page; vmballoon_alloc_page_list() local
719 vmballoon_handle_one_result(struct vmballoon *b, struct page *page, enum vmballoon_page_size_type page_size, unsigned long status) vmballoon_handle_one_result() argument
863 struct page *page; vmballoon_lock() local
929 struct page *page, *tmp; vmballoon_release_page_list() local
1007 struct page *page; vmballoon_enqueue_page_list() local
1050 struct page *page, *tmp; vmballoon_dequeue_page_list() local
1089 struct page *page, *tmp; vmballoon_split_refused_pages() local
1327 struct page *page; vmballoon_init_batching() local
1766 vmballoon_migratepage(struct balloon_dev_info *b_dev_info, struct page *newpage, struct page *page, enum migrate_mode mode) vmballoon_migratepage() argument
[all...]
/kernel/linux/linux-6.6/drivers/misc/
H A Dvmw_balloon.c141 * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
142 * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
254 * @pfn: the physical frame number of the page to be locked or unlocked.
264 * @max_page_size: maximum supported page size for ballooning.
271 * @size: balloon actual size in basic page size (frames).
279 * @target: balloon target size in basic page size (frames).
307 * @batch_page: pointer to communication batch page.
309 * When batching is used, batch_page points to a page, which holds up to
319 * disabled, only a single page can be locked/unlock on each operation.
326 * @page
333 struct page *page; global() member
597 vmballoon_mark_page_offline(struct page *page, enum vmballoon_page_size_type page_size) vmballoon_mark_page_offline() argument
612 vmballoon_mark_page_online(struct page *page, enum vmballoon_page_size_type page_size) vmballoon_mark_page_online() argument
663 struct page *page; vmballoon_alloc_page_list() local
712 vmballoon_handle_one_result(struct vmballoon *b, struct page *page, enum vmballoon_page_size_type page_size, unsigned long status) vmballoon_handle_one_result() argument
856 struct page *page; vmballoon_lock() local
922 struct page *page, *tmp; vmballoon_release_page_list() local
1000 struct page *page; vmballoon_enqueue_page_list() local
1043 struct page *page, *tmp; vmballoon_dequeue_page_list() local
1082 struct page *page, *tmp; vmballoon_split_refused_pages() local
1320 struct page *page; vmballoon_init_batching() local
1745 vmballoon_migratepage(struct balloon_dev_info *b_dev_info, struct page *newpage, struct page *page, enum migrate_mode mode) vmballoon_migratepage() argument
[all...]
/kernel/linux/linux-6.6/include/linux/
H A Dmemcontrol.h23 #include <linux/page-flags.h>
29 struct page;
33 /* Cgroup-specific page state, on top of universal node page state */
63 static inline bool is_prot_page(struct page *page) in is_prot_page() argument
212 * page cache and RSS per cgroup. We would eventually like to provide
363 /* page->memcg_data is a pointer to an objcgs vector */
365 /* page has been accounted as a non-slab kernel page */
457 page_memcg(struct page *page) page_memcg() argument
533 page_memcg_check(struct page *page) page_memcg_check() argument
579 PageMemcgKmem(struct page *page) PageMemcgKmem() argument
1018 mod_memcg_page_state(struct page *page, int idx, int val) mod_memcg_page_state() argument
1108 is_file_page(struct page *page) is_file_page() argument
1132 count_memcg_page_event(struct page *page, enum vm_event_item idx) count_memcg_page_event() argument
1220 page_memcg(struct page *page) page_memcg() argument
1236 page_memcg_check(struct page *page) page_memcg_check() argument
1246 PageMemcgKmem(struct page *page) PageMemcgKmem() argument
1554 mod_memcg_page_state(struct page *page, int idx, int val) mod_memcg_page_state() argument
1592 struct page *page = virt_to_head_page(p); __mod_lruvec_kmem_state() local
1600 struct page *page = virt_to_head_page(p); mod_lruvec_kmem_state() local
1617 count_memcg_page_event(struct page *page, int idx) count_memcg_page_event() argument
1834 memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) memcg_kmem_charge_page() argument
1842 memcg_kmem_uncharge_page(struct page *page, int order) memcg_kmem_uncharge_page() argument
1880 memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) memcg_kmem_charge_page() argument
1886 memcg_kmem_uncharge_page(struct page *page, int order) memcg_kmem_uncharge_page() argument
1890 __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) __memcg_kmem_charge_page() argument
1896 __memcg_kmem_uncharge_page(struct page *page, int order) __memcg_kmem_uncharge_page() argument
[all...]
/kernel/linux/linux-6.6/mm/
H A Ddebug_vm_pgtable.c3 * This kernel test validates architecture page table helpers and
43 * On s390 platform, the lower 4 bits are used to identify given page table
45 * pxx_clear() because of how dynamic page table folding works on s390. So
101 * This test needs to be executed after the given page table entry in pte_basic_tests()
122 struct page *page; in pte_advanced_tests() local
131 * PG_arch_1 for the page on ARM64. The page flag isn't cleared in pte_advanced_tests()
132 * when it's released and page allocation check will fail when in pte_advanced_tests()
133 * the page i in pte_advanced_tests()
220 struct page *page; pmd_advanced_tests() local
342 struct page *page; pud_advanced_tests() local
618 struct page *page; pte_clear_tests() local
892 struct page *page; swap_migration_tests() local
933 struct page *page; hugetlb_basic_tests() local
1029 struct page *page = NULL; destroy_args() local
1099 struct page *page = NULL; debug_vm_pgtable_alloc_huge_page() local
1196 struct page *page = NULL; init_args() local
[all...]
/kernel/linux/linux-5.10/arch/csky/abiv1/inc/abi/
H A Dcacheflush.h12 extern void flush_dcache_page(struct page *);
15 #define flush_cache_page(vma, page, pfn) cache_wbinv_all()
19 extern void flush_kernel_dcache_page(struct page *);
35 struct page *page, unsigned long vmaddr) in flush_anon_page()
37 if (PageAnon(page)) in flush_anon_page()
49 #define flush_icache_page(vma, page) do {} while (0);
54 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
59 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
34 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) flush_anon_page() argument
/kernel/linux/linux-5.10/include/linux/
H A Dslab_def.h89 static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, in nearest_obj() argument
92 void *object = x - (x - page->s_mem) % cache->size; in nearest_obj()
93 void *last_object = page->s_mem + (cache->num - 1) * cache->size; in nearest_obj()
108 const struct page *page, void *obj) in obj_to_index()
110 u32 offset = (obj - page->s_mem); in obj_to_index()
115 const struct page *page) in objs_per_slab_page()
107 obj_to_index(const struct kmem_cache *cache, const struct page *page, void *obj) obj_to_index() argument
114 objs_per_slab_page(const struct kmem_cache *cache, const struct page *page) objs_per_slab_page() argument
/kernel/linux/linux-5.10/fs/squashfs/
H A Dpage_actor.h11 void **page; member
17 static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page, in squashfs_page_actor_init() argument
26 actor->page = page; in squashfs_page_actor_init()
35 return actor->page[0]; in squashfs_first_page()
41 actor->page[actor->next_page++]; in squashfs_next_page()
52 struct page **page; member
64 extern struct squashfs_page_actor *squashfs_page_actor_init_special(struct page
/kernel/linux/linux-6.6/fs/efs/
H A Dsymlink.c17 struct page *page = &folio->page; in efs_symlink_read_folio() local
18 char *link = page_address(page); in efs_symlink_read_folio()
20 struct inode * inode = page->mapping->host; in efs_symlink_read_folio()
43 SetPageUptodate(page); in efs_symlink_read_folio()
44 unlock_page(page); in efs_symlink_read_folio()
47 SetPageError(page); in efs_symlink_read_folio()
48 unlock_page(page); in efs_symlink_read_folio()
/kernel/linux/linux-5.10/drivers/virtio/
H A Dvirtio_balloon.c26 * Balloon device works in 4K page units. So each page is pointed to by
28 * page units.
38 /* The order of free page blocks to report to host */
40 /* The size of a free page block in bytes */
68 /* The free page reporting work item submitted to the balloon wq */
84 /* The number of free page blocks on the above list */
105 * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE
126 /* Free page reporting device */
136 static u32 page_to_balloon_pfn(struct page *pag argument
195 set_page_pfns(struct virtio_balloon *vb, __virtio32 pfns[], struct page *page) set_page_pfns() argument
215 struct page *page; fill_balloon() local
223 struct page *page = balloon_page_alloc(); fill_balloon() local
264 struct page *page, *next; release_pages_balloon() local
278 struct page *page; leak_balloon() local
417 struct page *page; return_free_pages_to_mm() local
628 struct page *page; get_free_page_and_send() local
757 virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, struct page *newpage, struct page *page, enum migrate_mode mode) virtballoon_migratepage() argument
[all...]

Completed in 24 milliseconds

1...<<11121314151617181920>>...269