Home
last modified time | relevance | path

Searched refs:page (Results 726 - 750 of 6359) sorted by relevance

1...<<21222324252627282930>>...255

/kernel/linux/linux-6.6/fs/reiserfs/
H A Dxattr.c428 static inline void reiserfs_put_page(struct page *page) in reiserfs_put_page() argument
430 kunmap(page); in reiserfs_put_page()
431 put_page(page); in reiserfs_put_page()
434 static struct page *reiserfs_get_page(struct inode *dir, size_t n) in reiserfs_get_page()
437 struct page *page; in reiserfs_get_page() local
443 page = read_mapping_page(mapping, n >> PAGE_SHIFT, NULL); in reiserfs_get_page()
444 if (!IS_ERR(page)) in reiserfs_get_page()
445 kmap(page); in reiserfs_get_page()
520 struct page *page; reiserfs_xattr_set_handle() local
659 struct page *page; reiserfs_xattr_get() local
[all...]
/kernel/linux/linux-5.10/arch/arm/mm/
H A Dnommu.c17 #include <asm/page.h>
30 * empty_zero_page is a special page that is used for
33 struct page *empty_zero_page;
93 * Register the exception vector page. in arm_mm_memblock_reserve()
100 * There is no dedicated vector page on V7-M. So nothing needs to be in arm_mm_memblock_reserve()
153 * paging_init() sets up the page tables, initialises the zone memory
154 * maps, and sets up the zero page, bad page and bad page tables.
163 /* allocate the zero page in paging_init()
182 flush_dcache_page(struct page *page) flush_dcache_page() argument
188 flush_kernel_dcache_page(struct page *page) flush_kernel_dcache_page() argument
194 copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *dst, const void *src, unsigned long len) copy_to_user_page() argument
[all...]
/kernel/linux/linux-5.10/arch/sh/include/asm/
H A Dpage.h11 /* PAGE_SHIFT determines the page size */
21 # error "Bogus kernel page size?"
59 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
63 struct page;
66 extern void copy_user_highpage(struct page *to, struct page *from,
69 extern void clear_user_highpage(struct page *page, unsigned long vaddr);
97 typedef struct page *pgtable_
[all...]
/kernel/linux/linux-5.10/arch/riscv/mm/
H A Dpageattr.c159 int set_direct_map_invalid_noflush(struct page *page) in set_direct_map_invalid_noflush() argument
162 unsigned long start = (unsigned long)page_address(page); in set_direct_map_invalid_noflush()
176 int set_direct_map_default_noflush(struct page *page) in set_direct_map_default_noflush() argument
179 unsigned long start = (unsigned long)page_address(page); in set_direct_map_default_noflush()
193 void __kernel_map_pages(struct page *page, int numpages, int enable) in __kernel_map_pages() argument
199 __set_memory((unsigned long)page_address(page), numpages, in __kernel_map_pages()
202 __set_memory((unsigned long)page_address(page), numpage in __kernel_map_pages()
[all...]
/kernel/linux/linux-6.6/arch/ia64/include/asm/
H A Dpage.h24 #define RGN_GATE 5 /* Gate page, Kernel text, etc */
28 * PAGE_SHIFT determines the actual kernel page size.
39 # error Unsupported page size!
65 extern void clear_page (void *page);
72 #define clear_user_page(addr, vaddr, page) \
75 flush_dcache_page(page); \
78 #define copy_user_page(to, from, vaddr, page) \
81 flush_dcache_page(page); \
98 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIF
[all...]
/kernel/linux/linux-5.10/drivers/misc/eeprom/
H A Dee1004.c22 * in the 0x50-0x57 range for data. One of two 256-byte page is selected
41 * from page selection to end of read.
62 /* Nack means page 1 is selected */ in ee1004_get_current_page()
70 /* Ack means page 0 is selected, returned value meaningless */ in ee1004_get_current_page()
81 /* Can't cross page boundaries */ in ee1004_eeprom_read()
99 int page; in ee1004_read() local
104 page = off >> EE1004_PAGE_SHIFT; in ee1004_read()
105 if (unlikely(page > 1)) in ee1004_read()
118 /* Select page */ in ee1004_read()
119 if (page ! in ee1004_read()
[all...]
/kernel/linux/linux-5.10/drivers/iommu/arm/arm-smmu/
H A Darm-smmu-impl.c28 static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page, in arm_smmu_read_ns() argument
31 if (page == ARM_SMMU_GR0) in arm_smmu_read_ns()
33 return readl_relaxed(arm_smmu_page(smmu, page) + offset); in arm_smmu_read_ns()
36 static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page, in arm_smmu_write_ns() argument
39 if (page == ARM_SMMU_GR0) in arm_smmu_write_ns()
41 writel_relaxed(val, arm_smmu_page(smmu, page) + offset); in arm_smmu_write_ns()
135 * Disable MMU-500's not-particularly-beneficial next-page in arm_mmu500_reset()
151 static u64 mrvl_mmu500_readq(struct arm_smmu_device *smmu, int page, int off) in mrvl_mmu500_readq() argument
157 return hi_lo_readq_relaxed(arm_smmu_page(smmu, page) + off); in mrvl_mmu500_readq()
160 static void mrvl_mmu500_writeq(struct arm_smmu_device *smmu, int page, in argument
[all...]
/kernel/linux/linux-5.10/include/trace/events/
H A Dhuge_memory.h53 TP_PROTO(struct mm_struct *mm, struct page *page, bool writable,
56 TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped),
70 __entry->pfn = page ? page_to_pfn(page) : -1;
114 TP_PROTO(struct page *page, int none_or_zero,
117 TP_ARGS(page, none_or_zero, referenced, writable, status),
128 __entry->pfn = page ? page_to_pfn(page)
[all...]
/kernel/linux/linux-5.10/fs/btrfs/
H A Dcompression.h35 struct page **compressed_pages;
83 u64 start, struct page **pages,
87 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
96 struct page **compressed_pages,
148 u64 start, struct page **pages, unsigned long *out_pages,
152 struct page *dest_page, unsigned long start_byte, size_t srclen,
159 u64 start, struct page **pages, unsigned long *out_pages,
163 struct page *dest_page, unsigned long start_byte, size_t srclen,
169 u64 start, struct page **pages, unsigned long *out_pages,
173 struct page *dest_pag
[all...]
/kernel/linux/linux-5.10/include/xen/
H A Dxen-ops.h67 unsigned int domid, bool no_translate, struct page **pages);
72 bool no_translate, struct page **pages) in xen_remap_pfn()
87 struct page **pages);
89 int nr, struct page **pages);
100 struct page **pages) in xen_xlate_remap_gfn_array()
106 int nr, struct page **pages) in xen_xlate_unmap_gfn_range()
122 * @prot: page protection mask
137 struct page **pages) in xen_remap_domain_gfn_array()
159 * @prot: page protection mask
173 struct page **page in xen_remap_domain_mfn_array()
[all...]
/kernel/linux/linux-5.10/drivers/staging/media/atomisp/pci/hmm/
H A Dhmm_dynamic_pool.c58 page_obj[i].page = hmm_page->page; in get_pages_from_dynamic_pool()
95 /* free page directly back to system */ in free_pages_to_dynamic_pool()
96 ret = set_pages_wb(page_obj->page, 1); in free_pages_to_dynamic_pool()
99 "set page to WB err ...ret=%d\n", ret); in free_pages_to_dynamic_pool()
102 indicate that address of page is not in valid in free_pages_to_dynamic_pool()
104 then, _free_pages would panic; Do not know why page in free_pages_to_dynamic_pool()
108 __free_pages(page_obj->page, 0); in free_pages_to_dynamic_pool()
116 /* free page directly */ in free_pages_to_dynamic_pool()
117 ret = set_pages_wb(page_obj->page, in free_pages_to_dynamic_pool()
[all...]
/kernel/linux/linux-6.6/drivers/misc/eeprom/
H A Dee1004.c22 * in the 0x50-0x57 range for data. One of two 256-byte page is selected
42 * from page selection to end of read.
63 /* Nack means page 1 is selected */ in ee1004_get_current_page()
71 /* Ack means page 0 is selected, returned value meaningless */ in ee1004_get_current_page()
75 static int ee1004_set_current_page(struct device *dev, int page) in ee1004_set_current_page() argument
79 if (page == ee1004_current_page) in ee1004_set_current_page()
83 ret = i2c_smbus_write_byte(ee1004_set_page[page], 0x00); in ee1004_set_current_page()
85 * Don't give up just yet. Some memory modules will select the page in ee1004_set_current_page()
86 * but not ack the command. Check which page is selected now. in ee1004_set_current_page()
88 if (ret == -ENXIO && ee1004_get_current_page() == page) in ee1004_set_current_page()
104 int status, page; ee1004_eeprom_read() local
[all...]
/kernel/linux/linux-6.6/fs/btrfs/
H A Dcompression.h40 struct page **compressed_pages;
81 u64 start, struct page **pages,
85 int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
91 struct page **compressed_pages,
142 u64 start, struct page **pages, unsigned long *out_pages,
146 struct page *dest_page, unsigned long start_byte, size_t srclen,
153 u64 start, struct page **pages, unsigned long *out_pages,
157 struct page *dest_page, unsigned long start_byte, size_t srclen,
163 u64 start, struct page **pages, unsigned long *out_pages,
167 struct page *dest_pag
[all...]
/kernel/linux/linux-6.6/drivers/hwmon/pmbus/
H A Dlt7182s.c35 static int lt7182s_read_word_data(struct i2c_client *client, int page, int phase, int reg) in lt7182s_read_word_data() argument
41 if (page == 0 || page == 1) in lt7182s_read_word_data()
42 ret = pmbus_read_word_data(client, page, phase, MFR_READ_ITH); in lt7182s_read_word_data()
47 ret = pmbus_read_word_data(client, page, phase, MFR_IOUT_PEAK); in lt7182s_read_word_data()
50 ret = pmbus_read_word_data(client, page, phase, MFR_VOUT_PEAK); in lt7182s_read_word_data()
53 ret = pmbus_read_word_data(client, page, phase, MFR_VIN_PEAK); in lt7182s_read_word_data()
56 ret = pmbus_read_word_data(client, page, phase, MFR_TEMPERATURE_1_PEAK); in lt7182s_read_word_data()
59 ret = (page == 0) ? 0 : -ENODATA; in lt7182s_read_word_data()
68 static int lt7182s_write_word_data(struct i2c_client *client, int page, in argument
[all...]
/kernel/linux/linux-6.6/drivers/iommu/arm/arm-smmu/
H A Darm-smmu-impl.c28 static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page, in arm_smmu_read_ns() argument
31 if (page == ARM_SMMU_GR0) in arm_smmu_read_ns()
33 return readl_relaxed(arm_smmu_page(smmu, page) + offset); in arm_smmu_read_ns()
36 static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page, in arm_smmu_write_ns() argument
39 if (page == ARM_SMMU_GR0) in arm_smmu_write_ns()
41 writel_relaxed(val, arm_smmu_page(smmu, page) + offset); in arm_smmu_write_ns()
132 * Disable MMU-500's not-particularly-beneficial next-page in arm_mmu500_reset()
151 static u64 mrvl_mmu500_readq(struct arm_smmu_device *smmu, int page, int off) in mrvl_mmu500_readq() argument
157 return hi_lo_readq_relaxed(arm_smmu_page(smmu, page) + off); in mrvl_mmu500_readq()
160 static void mrvl_mmu500_writeq(struct arm_smmu_device *smmu, int page, in argument
[all...]
/kernel/linux/linux-5.10/fs/cifs/
H A Dfile.c2136 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) in cifs_partialpagewrite() argument
2138 struct address_space *mapping = page->mapping; in cifs_partialpagewrite()
2139 loff_t offset = (loff_t)page->index << PAGE_SHIFT; in cifs_partialpagewrite()
2149 inode = page->mapping->host; in cifs_partialpagewrite()
2152 write_data = kmap(page); in cifs_partialpagewrite()
2156 kunmap(page); in cifs_partialpagewrite()
2162 kunmap(page); in cifs_partialpagewrite()
2185 cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc); in cifs_partialpagewrite()
2190 kunmap(page); in cifs_partialpagewrite()
2218 struct page *page; wdata_prepare_pages() local
2483 cifs_writepage_locked(struct page *page, struct writeback_control *wbc) cifs_writepage_locked() argument
2523 cifs_writepage(struct page *page, struct writeback_control *wbc) cifs_writepage() argument
2530 cifs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) cifs_write_end() argument
3402 struct page *page; cifs_read_allocate_pages() local
3455 struct page *page = rdata->pages[i]; cifs_readdata_to_iov() local
3500 struct page *page = rdata->pages[i]; uncached_fill_pages() local
4141 struct page *page = vmf->page; cifs_page_mkwrite() local
4199 struct page *page = rdata->pages[i]; cifs_readv_complete() local
4243 struct page *page = rdata->pages[i]; readpages_fill_pages() local
4329 struct page *page, *tpage; readpages_get_pages() local
4436 struct page *page, *tpage; cifs_readpages() local
4542 cifs_readpage_worker(struct file *file, struct page *page, loff_t *poffset) cifs_readpage_worker() argument
4577 cifs_readpage_to_fscache(file_inode(file), page); cifs_readpage_worker() local
4589 cifs_readpage(struct file *file, struct page *page) cifs_readpage() argument
4666 struct page *page; cifs_write_begin() local
4734 cifs_release_page(struct page *page, gfp_t gfp) cifs_release_page() argument
4742 cifs_invalidate_page(struct page *page, unsigned int offset, unsigned int length) cifs_invalidate_page() argument
4751 cifs_launder_page(struct page *page) cifs_launder_page() argument
[all...]
/kernel/linux/linux-5.10/arch/arm/include/asm/
H A Dcacheflush.h20 * This flag is used to indicate that the page pointed to by a pte is clean
59 * before a change of page tables.
64 * specified address space before a change of page tables.
65 * - start - user start address (inclusive, page aligned)
66 * - end - user end address (exclusive, page aligned)
87 * Ensure that the data held in page is written back.
88 * - kaddr - page address
164 * Copy user data from/to a page which is mapped into a different
168 extern void copy_to_user_page(struct vm_area_struct *, struct page *,
170 #define copy_from_user_page(vma, page, vadd
306 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) flush_anon_page() argument
[all...]
/kernel/linux/linux-6.6/scripts/gdb/linux/
H A Dpage_owner.py22 t = """Usage: lx-dump-page-owner [Option]
26 lx-dump-page-owner --pfn 655360\n"""
31 """Dump page owner"""
39 super(DumpPageOwner, self).__init__("lx-dump-page-owner", gdb.COMMAND_SUPPORT)
79 def lookup_page_ext(self, page):
80 pfn = self.p_ops.page_to_pfn(page)
87 def page_ext_get(self, page):
88 page_ext = self.lookup_page_ext(page)
99 page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
100 pfn = self.p_ops.page_to_pfn(page)
[all...]
/kernel/linux/linux-6.6/mm/
H A Dsecretmem.c57 struct page *page; in secretmem_fault() local
68 page = find_lock_page(mapping, offset); in secretmem_fault()
69 if (!page) { in secretmem_fault()
76 page = &folio->page; in secretmem_fault()
77 err = set_direct_map_invalid_noflush(page); in secretmem_fault()
89 * If a split of large page was required, it in secretmem_fault()
90 * already happened when we marked the page invalid in secretmem_fault()
93 set_direct_map_default_noflush(page); in secretmem_fault()
[all...]
/kernel/linux/linux-5.10/fs/affs/
H A Dfile.c373 static int affs_writepage(struct page *page, struct writeback_control *wbc) in affs_writepage() argument
375 return block_write_full_page(page, affs_get_block, wbc); in affs_writepage()
378 static int affs_readpage(struct file *file, struct page *page) in affs_readpage() argument
380 return block_read_full_page(page, affs_get_block); in affs_readpage()
418 struct page **pagep, void **fsdata) in affs_write_begin()
434 struct page *page, void *fsdata) in affs_write_end()
439 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdat in affs_write_end()
432 affs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned int len, unsigned int copied, struct page *page, void *fsdata) affs_write_end() argument
522 affs_do_readpage_ofs(struct page *page, unsigned to, int create) affs_do_readpage_ofs() argument
630 affs_readpage_ofs(struct file *file, struct page *page) affs_readpage_ofs() argument
655 struct page *page; affs_write_begin_ofs() local
688 affs_write_end_ofs(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) affs_write_end_ofs() argument
881 struct page *page; affs_truncate() local
[all...]
/kernel/linux/linux-6.6/drivers/media/i2c/adv748x/
H A Dadv748x-core.c118 int adv748x_read(struct adv748x_state *state, u8 page, u8 reg) in adv748x_read() argument
120 return adv748x_read_check(state, page, reg); in adv748x_read()
123 int adv748x_write(struct adv748x_state *state, u8 page, u8 reg, u8 value) in adv748x_write() argument
125 return regmap_write(state->regmap[page], reg, value); in adv748x_write()
128 static int adv748x_write_check(struct adv748x_state *state, u8 page, u8 reg, in adv748x_write_check() argument
134 *error = adv748x_write(state, page, reg, value); in adv748x_write_check()
206 * @page: Regmap page identifier
208 * @value: value to write to @page at @reg
211 u8 page; member
240 u8 page = is_txa(tx) ? ADV748X_PAGE_TXA : ADV748X_PAGE_TXB; adv748x_power_up_tx() local
287 u8 page = is_txa(tx) ? ADV748X_PAGE_TXA : ADV748X_PAGE_TXB; adv748x_power_down_tx() local
[all...]
/kernel/linux/linux-6.6/include/linux/
H A Dhugetlb.h38 * For HugeTLB page, there are more metadata to save in the struct page. But
39 * the head struct page cannot meet our needs, so we have to abuse other tail
40 * struct page to store the metadata.
83 * instantiated within the map. The from and to elements are huge page
136 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
140 unsigned long, unsigned long, struct page *,
145 struct page *ref_page, zap_flags_t zap_flags);
178 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
214 * high-level pgtable page, bu
859 arch_clear_hugepage_flags(struct page *page) arch_clear_hugepage_flags() argument
[all...]
/kernel/linux/linux-5.10/arch/openrisc/include/asm/
H A Dpage.h19 /* PAGE_SHIFT determines the page size */
39 #define clear_page(page) memset((page), 0, PAGE_SIZE)
42 #define clear_user_page(page, vaddr, pg) clear_page(page)
57 typedef struct page *pgtable_t;
81 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
/kernel/linux/linux-5.10/arch/powerpc/include/asm/
H A Dcacheflush.h15 * if it's accessed right after the pte is set. The page fault handler does
27 extern void flush_dcache_page(struct page *page);
32 void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
36 void flush_dcache_icache_page(struct page *page);
37 void __flush_dcache_icache(void *page);
/kernel/linux/linux-6.6/arch/arc/include/asm/
H A Dcacheflush.h8 * -likewise for flush_cache_{range,page} (munmap, exit, COW-break)
30 void flush_dcache_page(struct page *page);
61 unsigned long user_addr, unsigned long page);
65 * get_user_pages() uses a kernel mapping to access the page
69 struct page *page, unsigned long u_vaddr);
74 * A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
94 * checks if two addresses (after page aligning) index into same cache set
102 #define copy_to_user_page(vma, page, vadd
[all...]

Completed in 17 milliseconds

1...<<21222324252627282930>>...255