/foundation/arkui/ace_engine/frameworks/bridge/declarative_frontend/engine/jsi/ |
H A D | jsi_animator_model_ng_impl.cpp | 61 auto page = GetCurrentPage(); in Create() local 62 CHECK_NULL_VOID(page); in Create() 63 auto animatorInfo = page->GetJsAnimator(animatorId); in Create() 68 page->GetPageUrl().c_str(), AceType::RawPtr(page)); in Create() 71 page->AddJsAnimator(animatorId, animatorInfo); in Create() 77 auto page = GetCurrentPage(); in GetAnimatorInfo() local 78 if (!page) { in GetAnimatorInfo() 79 TAG_LOGW(AceLogTag::ACE_ANIMATION, "look for animator component, but current page is null, id:%{public}s", in GetAnimatorInfo() 83 auto animatorInfo = page in GetAnimatorInfo() [all...] |
/kernel/linux/linux-5.10/arch/arm/mm/ |
H A D | copypage-v6.c | 26 * Copy the user page. No aliasing to deal with so we can just 29 static void v6_copy_user_highpage_nonaliasing(struct page *to, in v6_copy_user_highpage_nonaliasing() 30 struct page *from, unsigned long vaddr, struct vm_area_struct *vma) in v6_copy_user_highpage_nonaliasing() 42 * Clear the user page. No aliasing to deal with so we can just 43 * attack the kernel's existing mapping of this page. 45 static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) in v6_clear_user_highpage_nonaliasing() argument 47 void *kaddr = kmap_atomic(page); in v6_clear_user_highpage_nonaliasing() 53 * Discard data in the kernel mapping for the new page. 66 * Copy the page, takin 102 v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) v6_clear_user_highpage_aliasing() argument [all...] |
/kernel/linux/linux-6.6/arch/m68k/include/asm/ |
H A D | page_mm.h | 29 static inline void clear_page(void *page) in clear_page() argument 32 unsigned long *sp = page; in clear_page() 47 : "a" (page), "0" (sp), in clear_page() 52 #define clear_page(page) memset((page), 0, PAGE_SIZE) 56 #define clear_user_page(addr, vaddr, page) \ 58 flush_dcache_page(page); \ 60 #define copy_user_page(to, from, vaddr, page) \ 62 flush_dcache_page(page); \ 139 #define page_to_virt(page) ({ \ [all...] |
/kernel/linux/linux-6.6/arch/arm/mm/ |
H A D | copypage-v6.c | 27 * Copy the user page. No aliasing to deal with so we can just 30 static void v6_copy_user_highpage_nonaliasing(struct page *to, in v6_copy_user_highpage_nonaliasing() 31 struct page *from, unsigned long vaddr, struct vm_area_struct *vma) in v6_copy_user_highpage_nonaliasing() 43 * Clear the user page. No aliasing to deal with so we can just 44 * attack the kernel's existing mapping of this page. 46 static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) in v6_clear_user_highpage_nonaliasing() argument 48 void *kaddr = kmap_atomic(page); in v6_clear_user_highpage_nonaliasing() 54 * Discard data in the kernel mapping for the new page. 67 * Copy the page, takin 104 v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) v6_clear_user_highpage_aliasing() argument [all...] |
/kernel/linux/linux-6.6/arch/parisc/include/asm/ |
H A D | cacheflush.h | 50 static inline void flush_dcache_page(struct page *page) in flush_dcache_page() argument 52 flush_dcache_folio(page_folio(page)); in flush_dcache_page() 62 void flush_icache_pages(struct vm_area_struct *vma, struct page *page, 71 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 73 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, 84 void flush_anon_page(struct vm_area_struct *vma, struct page *pag [all...] |
/kernel/linux/linux-5.10/drivers/xen/ |
H A D | mem-reservation.c | 23 * Use one extent per PAGE_SIZE to avoid to break down the page into 30 struct page **pages, in __xenmem_reservation_va_mapping_update() 36 struct page *page = pages[i]; in __xenmem_reservation_va_mapping_update() local 37 unsigned long pfn = page_to_pfn(page); in __xenmem_reservation_va_mapping_update() 39 BUG_ON(!page); in __xenmem_reservation_va_mapping_update() 43 * different page granularity. in __xenmem_reservation_va_mapping_update() 49 /* Link back into the page tables if not highmem. */ in __xenmem_reservation_va_mapping_update() 50 if (!PageHighMem(page)) { in __xenmem_reservation_va_mapping_update() 64 struct page **page in __xenmem_reservation_va_mapping_reset() 69 struct page *page = pages[i]; __xenmem_reservation_va_mapping_reset() local [all...] |
/kernel/linux/linux-5.10/fs/squashfs/ |
H A D | symlink.c | 33 static int squashfs_symlink_readpage(struct file *file, struct page *page) in squashfs_symlink_readpage() argument 35 struct inode *inode = page->mapping->host; in squashfs_symlink_readpage() 38 int index = page->index << PAGE_SHIFT; in squashfs_symlink_readpage() 46 TRACE("Entered squashfs_symlink_readpage, page index %ld, start block " in squashfs_symlink_readpage() 47 "%llx, offset %x\n", page->index, block, offset); in squashfs_symlink_readpage() 66 * kmap_atomic to map the page. Instead call the underlying in squashfs_symlink_readpage() 80 pageaddr = kmap_atomic(page); in squashfs_symlink_readpage() 91 flush_dcache_page(page); in squashfs_symlink_readpage() 92 SetPageUptodate(page); in squashfs_symlink_readpage() [all...] |
/kernel/linux/linux-6.6/arch/x86/kernel/cpu/sgx/ |
H A D | sgx.h | 17 "EREMOVE returned %d (0x%x) and an EPC page was leaked. SGX may become unusable. " \ 26 /* Pages, which are being tracked by the page reclaimer. */ 42 * the free page list local to the node is stored here. 66 static inline unsigned long sgx_get_epc_phys_addr(struct sgx_epc_page *page) in sgx_get_epc_phys_addr() argument 68 struct sgx_epc_section *section = &sgx_epc_sections[page->section]; in sgx_get_epc_phys_addr() 71 index = ((unsigned long)page - (unsigned long)section->pages) / sizeof(*page); in sgx_get_epc_phys_addr() 76 static inline void *sgx_get_epc_virt_addr(struct sgx_epc_page *page) in sgx_get_epc_virt_addr() argument 78 struct sgx_epc_section *section = &sgx_epc_sections[page->section]; in sgx_get_epc_virt_addr() 81 index = ((unsigned long)page in sgx_get_epc_virt_addr() [all...] |
/third_party/node/deps/v8/src/heap/ |
H A D | conservative-stack-visitor.cc | 22 bool ConservativeStackVisitor::CheckPage(Address address, MemoryChunk* page) { in CheckPage() argument 23 if (address < page->area_start() || address >= page->area_end()) return false; in CheckPage() 25 auto base_ptr = page->object_start_bitmap()->FindBasePtr(address); in CheckPage() 43 page->SetFlag(BasicMemoryChunk::Flag::PINNED); in CheckPage() 60 for (Page* page : *isolate_->heap()->old_space()) { in VisitConservativelyIfPointer() 61 if (CheckPage(address, page)) { in VisitConservativelyIfPointer() 66 for (LargePage* page : *isolate_->heap()->lo_space()) { in VisitConservativelyIfPointer() 67 if (address >= page->area_start() && address < page in VisitConservativelyIfPointer() [all...] |
/kernel/linux/linux-5.10/kernel/events/ |
H A D | ring_buffer.c | 237 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in __perf_output_begin() 239 handle->addr = rb->data_pages[handle->page] + offset; in __perf_output_begin() 610 static struct page *rb_alloc_aux_page(int node, int order) in rb_alloc_aux_page() 612 struct page *page; in rb_alloc_aux_page() local 618 page = alloc_pages_node(node, PERF_AUX_GFP, order); in rb_alloc_aux_page() 619 } while (!page && order--); in rb_alloc_aux_page() 621 if (page && order) { in rb_alloc_aux_page() 625 * set its first page's private to this order; in rb_alloc_aux_page() 626 * !PagePrivate(page) mean in rb_alloc_aux_page() 638 struct page *page = virt_to_page(rb->aux_pages[idx]); rb_free_aux_page() local 712 struct page *page; rb_alloc_aux() local 733 struct page *page = virt_to_page(rb->aux_pages[0]); rb_alloc_aux() local 795 struct page *page; perf_mmap_alloc_page() local 808 struct page *page = virt_to_page(addr); perf_mmap_free_page() local 882 struct page *page = vmalloc_to_page(addr); perf_mmap_unmark_page() local [all...] |
/kernel/linux/linux-6.6/kernel/events/ |
H A D | ring_buffer.c | 238 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in __perf_output_begin() 240 handle->addr = rb->data_pages[handle->page] + offset; in __perf_output_begin() 609 static struct page *rb_alloc_aux_page(int node, int order) in rb_alloc_aux_page() 611 struct page *page; in rb_alloc_aux_page() local 617 page = alloc_pages_node(node, PERF_AUX_GFP, order); in rb_alloc_aux_page() 618 } while (!page && order--); in rb_alloc_aux_page() 620 if (page && order) { in rb_alloc_aux_page() 624 * set its first page's private to this order; in rb_alloc_aux_page() 625 * !PagePrivate(page) mean in rb_alloc_aux_page() 637 struct page *page = virt_to_page(rb->aux_pages[idx]); rb_free_aux_page() local 716 struct page *page; rb_alloc_aux() local 737 struct page *page = virt_to_page(rb->aux_pages[0]); rb_alloc_aux() local 796 struct page *page; perf_mmap_alloc_page() local 809 struct page *page = virt_to_page(addr); perf_mmap_free_page() local 884 struct page *page = vmalloc_to_page(addr); perf_mmap_unmark_page() local [all...] |
/kernel/linux/linux-5.10/mm/ |
H A D | usercopy.c | 161 struct page *page, bool to_user) in check_page_span() 165 struct page *endpage; in check_page_span() 191 /* Is the object wholly within one base page? */ in check_page_span() 196 /* Allow if fully inside the same compound (__GFP_COMP) page. */ in check_page_span() 198 if (likely(endpage == page)) in check_page_span() 206 is_reserved = PageReserved(page); in check_page_span() 207 is_cma = is_migrate_cma_page(page); in check_page_span() 212 page = virt_to_head_page(ptr); in check_page_span() 213 if (is_reserved && !PageReserved(page)) in check_page_span() 160 check_page_span(const void *ptr, unsigned long n, struct page *page, bool to_user) check_page_span() argument 226 struct page *page; check_heap_object() local [all...] |
H A D | page_reporting.c | 22 /* request page reporting */ 49 /* notify prdev of free page reporting request */ 75 * free lists/areas. We assume at least one page is populated. in page_reporting_drain() 78 struct page *page = sg_page(sg); in page_reporting_drain() local 79 int mt = get_pageblock_migratetype(page); in page_reporting_drain() 82 __putback_isolated_page(page, order, mt); in page_reporting_drain() 89 * If page was not comingled with another page we can in page_reporting_drain() 90 * consider the result to be "reported" since the page in page_reporting_drain() 116 struct page *page, *next; page_reporting_cycle() local [all...] |
/kernel/linux/linux-5.10/arch/ia64/include/asm/ |
H A D | page.h | 24 #define RGN_GATE 5 /* Gate page, Kernel text, etc */ 28 * PAGE_SHIFT determines the actual kernel page size. 39 # error Unsupported page size! 65 extern void clear_page (void *page); 72 #define clear_user_page(addr, vaddr, page) \ 75 flush_dcache_page(page); \ 78 #define copy_user_page(to, from, vaddr, page) \ 81 flush_dcache_page(page); \ 87 struct page *page [all...] |
/kernel/linux/linux-5.10/drivers/usb/gadget/function/ |
H A D | u_ether_configfs.h | 30 char *page) \ 36 result = gether_get_dev_addr(opts->net, page, PAGE_SIZE); \ 43 const char *page, size_t len)\ 54 ret = gether_set_dev_addr(opts->net, page); \ 65 char *page) \ 71 result = gether_get_host_addr(opts->net, page, PAGE_SIZE); \ 78 const char *page, size_t len)\ 89 ret = gether_set_host_addr(opts->net, page); \ 100 char *page) \ 108 return sprintf(page, " [all...] |
/kernel/linux/linux-5.10/fs/erofs/ |
H A D | namei.c | 90 static struct page *find_target_block_classic(struct inode *dir, in find_target_block_classic() 97 struct page *candidate = ERR_PTR(-ENOENT); in find_target_block_classic() 105 struct page *page = read_mapping_page(mapping, mid, NULL); in find_target_block_classic() local 107 if (!IS_ERR(page)) { in find_target_block_classic() 108 struct erofs_dirent *de = kmap_atomic(page); in find_target_block_classic() 118 put_page(page); in find_target_block_classic() 123 page = ERR_PTR(-EFSCORRUPTED); in find_target_block_classic() 150 candidate = page; in find_target_block_classic() 153 put_page(page); in find_target_block_classic() 173 struct page *page; erofs_namei() local [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | page_reporting.c | 38 MODULE_PARM_DESC(page_reporting_order, "Set page reporting order"); 59 /* request page reporting */ 86 /* notify prdev of free page reporting request */ 112 * free lists/areas. We assume at least one page is populated. in page_reporting_drain() 115 struct page *page = sg_page(sg); in page_reporting_drain() local 116 int mt = get_pageblock_migratetype(page); in page_reporting_drain() 119 __putback_isolated_page(page, order, mt); in page_reporting_drain() 126 * If page was not comingled with another page w in page_reporting_drain() 153 struct page *page, *next; page_reporting_cycle() local [all...] |
/kernel/linux/linux-6.6/arch/sh/mm/ |
H A D | cache.c | 60 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument 64 struct folio *folio = page_folio(page); in copy_to_user_page() 68 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); in copy_to_user_page() 78 flush_cache_page(vma, vaddr, page_to_pfn(page)); in copy_to_user_page() 81 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, in copy_from_user_page() argument 85 struct folio *folio = page_folio(page); in copy_from_user_page() 87 if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) && in copy_from_user_page() 89 void *vfrom = kmap_coherent(page, vadd in copy_from_user_page() 128 clear_user_highpage(struct page *page, unsigned long vaddr) clear_user_highpage() argument 158 __flush_anon_page(struct page *page, unsigned long vmaddr) __flush_anon_page() argument 243 flush_icache_pages(struct vm_area_struct *vma, struct page *page, unsigned int nr) flush_icache_pages() argument [all...] |
/kernel/linux/linux-5.10/tools/perf/ |
H A D | builtin-help.c | 123 static void exec_woman_emacs(const char *path, const char *page) in exec_woman_emacs() argument 131 if (asprintf(&man_page, "(woman \"%s\")", page) > 0) { in exec_woman_emacs() 139 static void exec_man_konqueror(const char *path, const char *page) in exec_man_konqueror() argument 162 if (asprintf(&man_page, "man:%s(1)", page) > 0) { in exec_man_konqueror() 170 static void exec_man_man(const char *path, const char *page) in exec_man_man() argument 174 execlp(path, "man", page, NULL); in exec_man_man() 178 static void exec_man_cmd(const char *cmd, const char *page) in exec_man_cmd() argument 182 if (asprintf(&shell_cmd, "%s %s", cmd, page) > 0) { in exec_man_cmd() 335 * system-wide paths after ours to find the manual page. If in setup_man_path() 345 static void exec_viewer(const char *name, const char *page) in exec_viewer() argument 364 const char *page = cmd_to_page(perf_cmd); show_man_page() local 381 const char *page = cmd_to_page(perf_cmd); show_info_page() local 387 get_html_page_path(char **page_path, const char *page) get_html_page_path() argument 416 const char *page = cmd_to_page(perf_cmd); show_html_page() local [all...] |
/kernel/linux/linux-5.10/fs/btrfs/ |
H A D | extent_io.h | 45 * page->private values. Every page that is controlled by the extent 46 * map has page->private set to one. 109 struct page *pages[INLINE_EXTENT_BUFFER_PAGES]; 178 struct page *page, size_t pg_offset, 181 int try_release_extent_mapping(struct page *page, gfp_t mask); 182 int try_release_extent_buffer(struct page *page); 298 struct page *page; global() member [all...] |
/kernel/linux/linux-6.6/include/linux/ |
H A D | huge_mm.h | 91 * - For file vma, check if the linear page offset of vma is 145 int split_huge_page_to_list(struct page *page, struct list_head *list); 146 static inline int split_huge_page(struct page *page) in split_huge_page() argument 148 return split_huge_page_to_list(page, NULL); in split_huge_page() 221 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 223 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 228 extern struct page *huge_zero_page; 231 static inline bool is_huge_zero_page(struct page *pag argument 295 split_huge_page_to_list(struct page *page, struct list_head *list) split_huge_page_to_list() argument 299 split_huge_page(struct page *page) split_huge_page() argument 354 is_huge_zero_page(struct page *page) is_huge_zero_page() argument [all...] |
/kernel/linux/linux-6.6/kernel/module/ |
H A D | decompress.c | 19 struct page **new_pages; in module_extend_max_pages() 34 static struct page *module_get_next_page(struct load_info *info) in module_get_next_page() 36 struct page *page; in module_get_next_page() local 45 page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); in module_get_next_page() 46 if (!page) in module_get_next_page() 49 info->pages[info->used_pages++] = page; in module_get_next_page() 50 return page; in module_get_next_page() 115 struct page *page in module_gzip_decompress() local 174 struct page *page = module_get_next_page(info); module_xz_decompress() local 258 struct page *page = module_get_next_page(info); module_zstd_decompress() local [all...] |
/kernel/linux/linux-6.6/tools/perf/ |
H A D | builtin-help.c | 125 static void exec_woman_emacs(const char *path, const char *page) in exec_woman_emacs() argument 133 if (asprintf(&man_page, "(woman \"%s\")", page) > 0) { in exec_woman_emacs() 141 static void exec_man_konqueror(const char *path, const char *page) in exec_man_konqueror() argument 164 if (asprintf(&man_page, "man:%s(1)", page) > 0) { in exec_man_konqueror() 172 static void exec_man_man(const char *path, const char *page) in exec_man_man() argument 176 execlp(path, "man", page, NULL); in exec_man_man() 180 static void exec_man_cmd(const char *cmd, const char *page) in exec_man_cmd() argument 184 if (asprintf(&shell_cmd, "%s %s", cmd, page) > 0) { in exec_man_cmd() 337 * system-wide paths after ours to find the manual page. If in setup_man_path() 347 static void exec_viewer(const char *name, const char *page) in exec_viewer() argument 366 const char *page = cmd_to_page(perf_cmd); show_man_page() local 383 const char *page = cmd_to_page(perf_cmd); show_info_page() local 389 get_html_page_path(char **page_path, const char *page) get_html_page_path() argument 419 const char *page = cmd_to_page(perf_cmd); show_html_page() local [all...] |
/kernel/linux/linux-6.6/arch/xtensa/include/asm/ |
H A D | page.h | 2 * include/asm-xtensa/page.h 22 * PAGE_SHIFT determines the page size 43 * If the cache size for one way is greater than the page size, we have to 44 * deal with cache aliasing. The cache index is wider than the page size: 55 * When the page number is translated to the physical page address, the lowest 59 * The kernel does not provide a mechanism to ensure that the page color 62 * the page might also change. 99 typedef struct { unsigned long pte; } pte_t; /* page table entry */ 102 typedef struct page *pgtable_ [all...] |
/third_party/node/test/cctest/ |
H A D | test_crypto_clienthello.cc | 5 // catch the memory violation, so do not use a guard page. 51 size_t page = GetPageSize(); in OverrunGuardedBuffer() local 52 CHECK_GE(page, N); in OverrunGuardedBuffer() 55 // Place the packet right before a guard page, which, when accessed, causes in OverrunGuardedBuffer() 57 alloc_base = static_cast<uint8_t*>(aligned_alloc(page, 2 * page)); in OverrunGuardedBuffer() 59 uint8_t* second_page = alloc_base + page; in OverrunGuardedBuffer() 60 CHECK_EQ(mprotect(second_page, page, PROT_NONE), 0); in OverrunGuardedBuffer() 65 VirtualAlloc(nullptr, 2 * page, MEM_COMMIT, PAGE_READWRITE)); in OverrunGuardedBuffer() 67 uint8_t* second_page = alloc_base + page; in OverrunGuardedBuffer() 87 size_t page = GetPageSize(); ~OverrunGuardedBuffer() local [all...] |