/kernel/linux/linux-6.6/arch/xtensa/include/asm/ |
H A D | pgalloc.h | 26 #define pmd_populate(mm, pmdp, page) \ 27 (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page))) 56 struct page *page; in pte_alloc_one() local 58 page = __pte_alloc_one(mm, GFP_PGTABLE_USER); in pte_alloc_one() 59 if (!page) in pte_alloc_one() 61 ptes_clear(page_address(page)); in pte_alloc_one() 62 return page; in pte_alloc_one()
|
/kernel/linux/linux-6.6/fs/f2fs/ |
H A D | compress.c | 29 unsigned int size = sizeof(struct page *) * nr; in page_array_alloc() 40 unsigned int size = sizeof(struct page *) * nr; in page_array_free() 76 bool f2fs_is_compressed_page(struct page *page) in f2fs_is_compressed_page() argument 78 if (!PagePrivate(page)) in f2fs_is_compressed_page() 80 if (!page_private(page)) in f2fs_is_compressed_page() 82 if (page_private_nonpointer(page)) in f2fs_is_compressed_page() 85 f2fs_bug_on(F2FS_M_SB(page->mapping), in f2fs_is_compressed_page() 86 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC); in f2fs_is_compressed_page() 90 static void f2fs_set_compressed_page(struct page *pag argument 138 f2fs_compress_control_page(struct page *page) f2fs_compress_control_page() argument 163 f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page) f2fs_compress_ctx_add_page() argument 590 struct page *page; f2fs_compress_alloc_page() local 598 f2fs_compress_free_page(struct page *page) f2fs_compress_free_page() argument 807 f2fs_end_read_compressed_page(struct page *page, bool failed, block_t blkaddr, bool in_task) f2fs_end_read_compressed_page() argument 883 struct page *page = cc->rpages[i]; cluster_has_invalid_data() local 1043 struct page *page; prepare_compress_overwrite() local 1409 f2fs_compress_write_end_io(struct bio *bio, struct page *page) f2fs_compress_write_end_io() argument 1660 struct page *page; f2fs_alloc_dic() local 1801 f2fs_put_page_dic(struct page *page, bool in_task) f2fs_put_page_dic() argument 1853 f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, nid_t ino, block_t blkaddr) f2fs_cache_compressed_page() argument 1896 f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, block_t blkaddr) f2fs_load_compressed_page() argument [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/cavium/liquidio/ |
H A D | octeon_network.h | 263 struct page *page; in recv_buffer_alloc() local 267 page = alloc_page(GFP_ATOMIC); in recv_buffer_alloc() 268 if (unlikely(!page)) in recv_buffer_alloc() 273 __free_page(page); in recv_buffer_alloc() 274 pg_info->page = NULL; in recv_buffer_alloc() 286 pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0, in recv_buffer_alloc() 291 __free_page(page); in recv_buffer_alloc() 293 pg_info->page = NULL; in recv_buffer_alloc() 297 pg_info->page in recv_buffer_alloc() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/cavium/liquidio/ |
H A D | octeon_network.h | 263 struct page *page; in recv_buffer_alloc() local 267 page = alloc_page(GFP_ATOMIC); in recv_buffer_alloc() 268 if (unlikely(!page)) in recv_buffer_alloc() 273 __free_page(page); in recv_buffer_alloc() 274 pg_info->page = NULL; in recv_buffer_alloc() 286 pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0, in recv_buffer_alloc() 291 __free_page(page); in recv_buffer_alloc() 293 pg_info->page = NULL; in recv_buffer_alloc() 297 pg_info->page in recv_buffer_alloc() [all...] |
/kernel/linux/linux-6.6/drivers/net/vmxnet3/ |
H A D | vmxnet3_xdp.c | 123 struct page *page; in vmxnet3_xdp_xmit_frame() local 148 } else { /* XDP buffer from page pool */ in vmxnet3_xdp_xmit_frame() 149 page = virt_to_page(xdpf->data); in vmxnet3_xdp_xmit_frame() 150 tbi->dma_addr = page_pool_get_dma_addr(page) + in vmxnet3_xdp_xmit_frame() 255 struct page *page; in vmxnet3_run_xdp() local 261 page = virt_to_page(xdp->data_hard_start); in vmxnet3_run_xdp() 272 page_pool_recycle_direct(rq->page_pool, page); in vmxnet3_run_xdp() 280 page_pool_recycle_direct(rq->page_pool, page); in vmxnet3_run_xdp() 303 vmxnet3_build_skb(struct vmxnet3_rx_queue *rq, struct page *page, const struct xdp_buff *xdp) vmxnet3_build_skb() argument 332 struct page *page; vmxnet3_process_xdp_small() local 378 struct page *page; vmxnet3_process_xdp() local [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | memremap.c | 113 struct page *first_page; in pageunmap_range() 227 * allocate and initialize struct page for the device memory. More- in pagemap_range() 440 * @pfn: page frame number to lookup page_map 471 void free_zone_device_page(struct page *page) in free_zone_device_page() argument 473 if (WARN_ON_ONCE(!page->pgmap->ops || !page->pgmap->ops->page_free)) in free_zone_device_page() 476 mem_cgroup_uncharge(page_folio(page)); in free_zone_device_page() 483 VM_BUG_ON_PAGE(PageAnon(page) && PageCompound(page), pag in free_zone_device_page() 522 zone_device_page_init(struct page *page) zone_device_page_init() argument 535 __put_devmap_managed_page_refs(struct page *page, int refs) __put_devmap_managed_page_refs() argument [all...] |
/kernel/linux/linux-6.6/lib/ |
H A D | fault-inject.c | 252 static ssize_t fault_uint_attr_show(unsigned int val, char *page) in fault_uint_attr_show() argument 254 return snprintf(page, PAGE_SIZE, "%u\n", val); in fault_uint_attr_show() 257 static ssize_t fault_ulong_attr_show(unsigned long val, char *page) in fault_ulong_attr_show() argument 259 return snprintf(page, PAGE_SIZE, "%lu\n", val); in fault_ulong_attr_show() 262 static ssize_t fault_bool_attr_show(bool val, char *page) in fault_bool_attr_show() argument 264 return snprintf(page, PAGE_SIZE, "%u\n", val); in fault_bool_attr_show() 267 static ssize_t fault_atomic_t_attr_show(atomic_t val, char *page) in fault_atomic_t_attr_show() argument 269 return snprintf(page, PAGE_SIZE, "%d\n", atomic_read(&val)); in fault_atomic_t_attr_show() 272 static ssize_t fault_uint_attr_store(unsigned int *val, const char *page, size_t count) in fault_uint_attr_store() argument 277 result = kstrtouint(page, in fault_uint_attr_store() 285 fault_ulong_attr_store(unsigned long *val, const char *page, size_t count) fault_ulong_attr_store() argument 298 fault_bool_attr_store(bool *val, const char *page, size_t count) fault_bool_attr_store() argument 311 fault_atomic_t_attr_store(atomic_t *val, const char *page, size_t count) fault_atomic_t_attr_store() argument 366 fault_stacktrace_depth_show(struct config_item *item, char *page) fault_stacktrace_depth_show() argument 371 fault_stacktrace_depth_store(struct config_item *item, const char *page, size_t count) fault_stacktrace_depth_store() argument 389 fault_xul_attr_show(unsigned long val, char *page) fault_xul_attr_show() argument 395 fault_xul_attr_store(unsigned long *val, const char *page, size_t count) fault_xul_attr_store() argument [all...] |
/kernel/linux/linux-5.10/arch/arm64/kernel/ |
H A D | mte.c | 23 static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap) in mte_sync_page_tags() argument 30 if (!non_swap_entry(entry) && mte_restore_tags(entry, page)) in mte_sync_page_tags() 34 mte_clear_page_tags(page_address(page)); in mte_sync_page_tags() 39 struct page *page = pte_page(pte); in mte_sync_tags() local 40 long i, nr_pages = compound_nr(page); in mte_sync_tags() 44 for (i = 0; i < nr_pages; i++, page++) { in mte_sync_tags() 45 if (!test_and_set_bit(PG_mte_tagged, &page->flags)) in mte_sync_tags() 46 mte_sync_page_tags(page, pte in mte_sync_tags() 231 struct page *page = NULL; __access_remote_tags() local [all...] |
/kernel/linux/linux-5.10/arch/powerpc/mm/book3s64/ |
H A D | pgtable.c | 30 * This is called when relaxing access to a hugepage. It's also called in the page 88 * lookup in page tables with local interrupts disabled. For huge pages 90 * pmd_t we want to prevent transit from pmd pointing to page table 91 * to pmd pointing to huge page (and back) while interrupts are disabled. 92 * We clear pmd to possibly replace it with page table pointer in 126 * this PMD pte entry to a regular level 0 PTE by a parallel page fault. in pmdp_huge_get_and_clear_full() 147 pmd_t mk_pmd(struct page *page, pgprot_t pgprot) in mk_pmd() argument 149 return pfn_pmd(page_to_pfn(page), pgprot); in mk_pmd() 279 * If we have taken up all the fragments mark PTE page NUL in get_pmd_from_cache() 292 struct page *page; __alloc_for_pmdcache() local 343 struct page *page = virt_to_page(pmd); pmd_fragment_free() local [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | vmstat.h | 138 * Zone and node-based page accounting with per cpu differentials. 264 void __inc_zone_page_state(struct page *, enum zone_stat_item); 265 void __dec_zone_page_state(struct page *, enum zone_stat_item); 268 void __inc_node_page_state(struct page *, enum node_stat_item); 269 void __dec_node_page_state(struct page *, enum node_stat_item); 272 void inc_zone_page_state(struct page *, enum zone_stat_item); 273 void dec_zone_page_state(struct page *, enum zone_stat_item); 276 void inc_node_page_state(struct page *, enum node_stat_item); 277 void dec_node_page_state(struct page *, enum node_stat_item); 347 static inline void __inc_zone_page_state(struct page *pag argument 353 __inc_node_page_state(struct page *page, enum node_stat_item item) __inc_node_page_state() argument 360 __dec_zone_page_state(struct page *page, enum zone_stat_item item) __dec_zone_page_state() argument 366 __dec_node_page_state(struct page *page, enum node_stat_item item) __dec_node_page_state() argument [all...] |
H A D | kasan.h | 8 struct page; 50 void kasan_alloc_pages(struct page *page, unsigned int order); 51 void kasan_free_pages(struct page *page, unsigned int order); 56 void kasan_poison_slab(struct page *page); 115 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} in kasan_alloc_pages() argument 116 static inline void kasan_free_pages(struct page *pag in kasan_alloc_pages() argument 122 kasan_poison_slab(struct page *page) kasan_poison_slab() argument [all...] |
/kernel/linux/linux-5.10/scripts/kconfig/lxdialog/ |
H A D | textbox.c | 21 static char *page; variable 57 page = buf; /* page is pointer to start of page to be displayed */ in dialog_textbox() 123 /* Print first page of text */ in dialog_textbox() 139 case 'g': /* First page */ in dialog_textbox() 143 page = buf; in dialog_textbox() 149 case 'G': /* Last page */ in dialog_textbox() 154 page = buf + strlen(buf); in dialog_textbox() 169 case 'B': /* Previous page */ in dialog_textbox() [all...] |
/kernel/linux/linux-6.6/scripts/kconfig/lxdialog/ |
H A D | textbox.c | 13 static const char *buf, *page; variable 18 * 'page' will be updated to point to the desired line in 'buf'. 27 if (*page == '\0') { in back_lines() 33 if (page == buf) { in back_lines() 37 page--; in back_lines() 39 if (page == buf) { in back_lines() 43 page--; in back_lines() 44 } while (*page != '\n'); in back_lines() 45 page++; in back_lines() 51 * 'page' shoul [all...] |
/kernel/linux/linux-6.6/drivers/usb/gadget/ |
H A D | configfs.c | 138 char *page) \ 140 return sprintf(page, "0x%02x\n", \ 146 char *page) \ 148 return sprintf(page, "0x%04x\n", \ 155 const char *page, size_t len) \ 159 ret = kstrtou8(page, 0, &val); \ 168 const char *page, size_t len) \ 172 ret = kstrtou16(page, 0, &val); \ 206 const char *page, size_t len) in gadget_dev_desc_bcdDevice_store() 211 ret = kstrtou16(page, in gadget_dev_desc_bcdDevice_store() 205 gadget_dev_desc_bcdDevice_store(struct config_item *item, const char *page, size_t len) gadget_dev_desc_bcdDevice_store() argument 222 gadget_dev_desc_bcdUSB_store(struct config_item *item, const char *page, size_t len) gadget_dev_desc_bcdUSB_store() argument 239 gadget_dev_desc_UDC_show(struct config_item *item, char *page) gadget_dev_desc_UDC_show() argument 268 gadget_dev_desc_UDC_store(struct config_item *item, const char *page, size_t len) gadget_dev_desc_UDC_store() argument 311 gadget_dev_desc_max_speed_show(struct config_item *item, char *page) gadget_dev_desc_max_speed_show() argument 319 gadget_dev_desc_max_speed_store(struct config_item *item, const char *page, size_t len) gadget_dev_desc_max_speed_store() argument 521 gadget_config_desc_MaxPower_show(struct config_item *item, char *page) gadget_config_desc_MaxPower_show() argument 529 gadget_config_desc_MaxPower_store(struct config_item *item, const char *page, size_t len) gadget_config_desc_MaxPower_store() argument 544 gadget_config_desc_bmAttributes_show(struct config_item *item, char *page) gadget_config_desc_bmAttributes_show() argument 552 gadget_config_desc_bmAttributes_store(struct config_item *item, const char *page, size_t len) gadget_config_desc_bmAttributes_store() argument 800 gadget_string_id_show(struct config_item *item, char *page) gadget_string_id_show() argument 810 gadget_string_s_show(struct config_item *item, char *page) gadget_string_s_show() argument 819 gadget_string_s_store(struct config_item *item, const char *page, size_t len) gadget_string_s_store() argument 970 webusb_use_show(struct config_item *item, char *page) webusb_use_show() argument 976 webusb_use_store(struct config_item *item, const char *page, size_t len) webusb_use_store() argument 994 webusb_bcdVersion_show(struct config_item *item, char *page) webusb_bcdVersion_show() argument 1000 webusb_bcdVersion_store(struct config_item *item, const char *page, size_t len) webusb_bcdVersion_store() argument 1022 webusb_bVendorCode_show(struct config_item *item, char *page) webusb_bVendorCode_show() argument 1028 webusb_bVendorCode_store(struct config_item *item, const char *page, size_t len) webusb_bVendorCode_store() argument 1046 webusb_landingPage_show(struct config_item *item, char *page) webusb_landingPage_show() argument 1051 webusb_landingPage_store(struct config_item *item, const char *page, size_t len) webusb_landingPage_store() argument 1115 os_desc_use_show(struct config_item *item, char *page) os_desc_use_show() argument 1121 os_desc_use_store(struct config_item *item, const char *page, size_t len) os_desc_use_store() argument 1139 os_desc_b_vendor_code_show(struct config_item *item, char *page) os_desc_b_vendor_code_show() argument 1145 os_desc_b_vendor_code_store(struct config_item *item, const char *page, size_t len) os_desc_b_vendor_code_store() argument 1163 os_desc_qw_sign_show(struct config_item *item, char *page) os_desc_qw_sign_show() argument 1175 os_desc_qw_sign_store(struct config_item *item, const char *page, size_t len) os_desc_qw_sign_store() argument 1272 ext_prop_type_show(struct config_item *item, char *page) ext_prop_type_show() argument 1277 ext_prop_type_store(struct config_item *item, const char *page, size_t len) ext_prop_type_store() argument 1316 ext_prop_data_show(struct config_item *item, char *page) ext_prop_data_show() argument 1330 ext_prop_data_store(struct config_item *item, const char *page, size_t len) ext_prop_data_store() argument 1450 interf_grp_compatible_id_show(struct config_item *item, char *page) interf_grp_compatible_id_show() argument 1457 interf_grp_compatible_id_store(struct config_item *item, const char *page, size_t len) interf_grp_compatible_id_store() argument 1476 interf_grp_sub_compatible_id_show(struct config_item *item, char *page) interf_grp_sub_compatible_id_show() argument 1483 interf_grp_sub_compatible_id_store(struct config_item *item, const char *page, size_t len) interf_grp_sub_compatible_id_store() argument [all...] |
/kernel/linux/linux-5.10/drivers/md/ |
H A D | md-bitmap.c | 41 * check a page and, if necessary, allocate it (or hijack it if the alloc fails) 43 * 1) check to see if this page is allocated, if it's not then try to alloc 44 * 2) if the alloc fails, set the page's hijacked flag so we'll use the 45 * page pointer directly as a counter 47 * if we find our page, we increment the page's refcount so that it stays 51 unsigned long page, int create, int no_hijack) 57 WARN_ON_ONCE(page >= bitmap->pages); 58 if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ 61 if (bitmap->bp[page] 112 md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page) md_bitmap_checkfree() argument 142 read_sb_page(struct mddev *mddev, loff_t offset, struct page *page, unsigned long index, int size) read_sb_page() argument 205 write_sb_page(struct bitmap *bitmap, struct page *page, int wait) write_sb_page() argument 281 write_page(struct bitmap *bitmap, struct page *page, int wait) write_page() argument 320 free_buffers(struct page *page) free_buffers() argument 344 read_page(struct file *file, unsigned long index, struct bitmap *bitmap, unsigned long count, struct page *page) read_page() argument 922 struct page *page; md_bitmap_file_set_bit() local 951 struct page *page; md_bitmap_file_clear_bit() local 979 struct page *page; md_bitmap_file_test_bit() local 1052 struct page *page = NULL; md_bitmap_init_from_disk() local 1196 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; md_bitmap_count_page() local 1204 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; md_bitmap_set_pending() local 1356 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; global() variable 2164 unsigned long page; md_bitmap_resize() local 2265 location_show(struct mddev *mddev, char *page) location_show() argument 2386 space_show(struct mddev *mddev, char *page) space_show() argument 2419 timeout_show(struct mddev *mddev, char *page) timeout_show() argument 2469 backlog_show(struct mddev *mddev, char *page) backlog_show() argument 2529 chunksize_show(struct mddev *mddev, char *page) chunksize_show() argument 2558 metadata_show(struct mddev *mddev, char *page) metadata_show() argument 2585 can_clear_show(struct mddev *mddev, char *page) can_clear_show() argument 2617 behind_writes_used_show(struct mddev *mddev, char *page) behind_writes_used_show() argument [all...] |
/kernel/linux/linux-5.10/arch/ia64/mm/ |
H A D | init.c | 48 struct page *vmem_map; 52 struct page *zero_page_memmap_ptr; /* map entry for zero page */ 59 struct page *page; in __ia64_sync_icache_dcache() local 61 page = pte_page(pte); in __ia64_sync_icache_dcache() 62 addr = (unsigned long) page_address(page); in __ia64_sync_icache_dcache() 64 if (test_bit(PG_arch_1, &page->flags)) in __ia64_sync_icache_dcache() 67 flush_icache_range(addr, addr + page_size(page)); in __ia64_sync_icache_dcache() 68 set_bit(PG_arch_1, &page in __ia64_sync_icache_dcache() 207 put_kernel_page(struct page *page, unsigned long address, pgprot_t pgprot) put_kernel_page() argument 242 struct page *page; setup_gate() local 490 void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, create_mem_map_page_table() local [all...] |
/kernel/linux/linux-5.10/drivers/media/i2c/adv748x/ |
H A D | adv748x-core.c | 118 int adv748x_read(struct adv748x_state *state, u8 page, u8 reg) in adv748x_read() argument 120 return adv748x_read_check(state, page, reg); in adv748x_read() 123 int adv748x_write(struct adv748x_state *state, u8 page, u8 reg, u8 value) in adv748x_write() argument 125 return regmap_write(state->regmap[page], reg, value); in adv748x_write() 128 static int adv748x_write_check(struct adv748x_state *state, u8 page, u8 reg, in adv748x_write_check() argument 134 *error = adv748x_write(state, page, reg, value); in adv748x_write_check() 206 * @page: Regmap page identifier 208 * @value: value to write to @page at @reg 211 u8 page; member 240 u8 page = is_txa(tx) ? ADV748X_PAGE_TXA : ADV748X_PAGE_TXB; adv748x_power_up_tx() local 287 u8 page = is_txa(tx) ? ADV748X_PAGE_TXA : ADV748X_PAGE_TXB; adv748x_power_down_tx() local [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/ |
H A D | nouveau_dmem.c | 47 * nouveau to be more page like (not necessarily with system page size but a 48 * bigger page size) at lowest level and have some shim layer on top that would 85 struct page *free_pages; 89 static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page) in nouveau_page_to_chunk() argument 91 return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap); in nouveau_page_to_chunk() 94 static struct nouveau_drm *page_to_drm(struct page *page) in page_to_drm() argument 96 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); in page_to_drm() 101 nouveau_dmem_page_addr(struct page *page) nouveau_dmem_page_addr() argument 110 nouveau_dmem_page_free(struct page *page) nouveau_dmem_page_free() argument 231 struct page *page; nouveau_dmem_chunk_alloc() local 310 struct page *page = NULL; nouveau_dmem_page_alloc_locked() local 332 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page) nouveau_dmem_page_free_locked() argument [all...] |
/kernel/linux/linux-5.10/arch/xtensa/include/asm/ |
H A D | page.h | 2 * include/asm-xtensa/page.h 20 * PAGE_SHIFT determines the page size 41 * If the cache size for one way is greater than the page size, we have to 42 * deal with cache aliasing. The cache index is wider than the page size: 53 * When the page number is translated to the physical page address, the lowest 57 * The kernel does not provide a mechanism to ensure that the page color 60 * the page might also change. 97 typedef struct { unsigned long pte; } pte_t; /* page table entry */ 100 typedef struct page *pgtable_ [all...] |
/kernel/linux/linux-5.10/arch/m68k/include/asm/ |
H A D | cacheflush_mm.h | 221 /* Push the page at kernel virtual address and clear the icache */ 252 #define flush_dcache_page(page) __flush_page_to_ram(page_address(page)) 255 #define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page)) 257 extern void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, 264 struct page *page, unsigned long vaddr, in copy_to_user_page() 267 flush_cache_page(vma, vaddr, page_to_pfn(page)); in copy_to_user_page() 263 copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, void *src, int len) copy_to_user_page() argument 271 copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, void *src, int len) copy_from_user_page() argument [all...] |
/kernel/linux/linux-6.6/arch/m68k/include/asm/ |
H A D | cacheflush_mm.h | 222 /* Push the page at kernel virtual address and clear the icache */ 258 #define flush_dcache_page(page) __flush_pages_to_ram(page_address(page), 1) 263 #define flush_icache_pages(vma, page, nr) \ 264 __flush_pages_to_ram(page_address(page), nr) 266 extern void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, 273 struct page *page, unsigned long vaddr, in copy_to_user_page() 276 flush_cache_page(vma, vaddr, page_to_pfn(page)); in copy_to_user_page() 272 copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, void *src, int len) copy_to_user_page() argument 280 copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, void *src, int len) copy_from_user_page() argument [all...] |
/kernel/linux/linux-5.10/fs/iomap/ |
H A D | seek.c | 14 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff. 18 page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff, in page_seek_hole_data() argument 24 loff_t poff = page_offset(page); in page_seek_hole_data() 31 * Last offset smaller than the start of the page means we found in page_seek_hole_data() 40 * Just check the page unless we can and should check block ranges: in page_seek_hole_data() 43 return PageUptodate(page) == seek_data; in page_seek_hole_data() 45 lock_page(page); in page_seek_hole_data() 46 if (unlikely(page->mapping != inode->i_mapping)) in page_seek_hole_data() 52 if (ops->is_partially_uptodate(page, of in page_seek_hole_data() 96 struct page *page = pvec.pages[i]; page_cache_seek_hole_data() local [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvif/ |
H A D | vmm.c | 78 u8 page, u8 align, u64 size, struct nvif_vma *vma) in nvif_vmm_get() 85 args.page = page; in nvif_vmm_get() 110 kfree(vmm->page); in nvif_vmm_dtor() 123 vmm->page = NULL; in nvif_vmm_ctor() 142 vmm->page = kmalloc_array(vmm->page_nr, sizeof(*vmm->page), in nvif_vmm_ctor() 144 if (!vmm->page) { in nvif_vmm_ctor() 157 vmm->page[i].shift = args.shift; in nvif_vmm_ctor() 158 vmm->page[ in nvif_vmm_ctor() 77 nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse, u8 page, u8 align, u64 size, struct nvif_vma *vma) nvif_vmm_get() argument [all...] |
/kernel/linux/linux-5.10/drivers/staging/rtl8188eu/core/ |
H A D | rtw_debug.c | 12 int proc_get_drv_version(char *page, char **start, in proc_get_drv_version() argument 18 len += scnprintf(page + len, count - len, "%s\n", DRIVERVERSION); in proc_get_drv_version() 24 int proc_get_write_reg(char *page, char **start, in proc_get_write_reg() argument 73 int proc_get_read_reg(char *page, char **start, in proc_get_read_reg() argument 89 len += scnprintf(page + len, count - len, "usb_read8(0x%x)=0x%x\n", proc_get_read_addr, usb_read8(padapter, proc_get_read_addr)); in proc_get_read_reg() 92 len += scnprintf(page + len, count - len, "usb_read16(0x%x)=0x%x\n", proc_get_read_addr, usb_read16(padapter, proc_get_read_addr)); in proc_get_read_reg() 95 len += scnprintf(page + len, count - len, "usb_read32(0x%x)=0x%x\n", proc_get_read_addr, usb_read32(padapter, proc_get_read_addr)); in proc_get_read_reg() 98 len += scnprintf(page + len, count - len, "error read length=%d\n", proc_get_read_len); in proc_get_read_reg() 133 int proc_get_adapter_state(char *page, char **start, in proc_get_adapter_state() argument 141 len += scnprintf(page in proc_get_adapter_state() 148 proc_get_best_channel(char *page, char **start, off_t offset, int count, int *eof, void *data) proc_get_best_channel() argument [all...] |
/kernel/linux/linux-6.6/drivers/s390/char/ |
H A D | sclp_con.c | 50 void *page; in sclp_conbuf_callback() local 53 page = sclp_unmake_buffer(buffer); in sclp_conbuf_callback() 58 list_add_tail((struct list_head *) page, &sclp_con_pages); in sclp_conbuf_callback() 139 void *page; in sclp_console_drop_buffer() local 151 page = sclp_unmake_buffer(buffer); in sclp_console_drop_buffer() 152 list_add_tail((struct list_head *) page, &sclp_con_pages); in sclp_console_drop_buffer() 164 void *page; in sclp_console_write() local 186 page = sclp_con_pages.next; in sclp_console_write() 187 list_del((struct list_head *) page); in sclp_console_write() 188 sclp_conbuf = sclp_make_buffer(page, SCLP_CON_COLUMN in sclp_console_write() 272 void *page; sclp_console_init() local [all...] |