/device/soc/rockchip/common/sdk_linux/kernel/power/ |
H A D | snapshot.c | 313 * and each of them corresonds to one zone. For each zone bitmap 317 * struct memory_bitmap contains a pointer to the main list of zone 320 * zone bitmap objects and bitmap block objects. 330 * PFNs that correspond to the start and end of the represented zone. 338 * access to the bits. There is one radix tree for each zone (as returned 367 * populated memory zone. 383 struct mem_zone_bm_rtree *zone; member 391 struct linked_page *p_list; /* list of pages used to store zone 438 * linked list in order. This is guaranteed by the zone 441 add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, int safe_needed, struct chain_allocator *ca) add_rtree_block() argument 513 struct mem_zone_bm_rtree *zone; create_zone_bm_rtree() local 546 free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, int clear_nosave_free) free_zone_bm_rtree() argument 595 struct zone *zone; create_mem_extents() local 670 struct mem_zone_bm_rtree *zone; memory_bm_create() local 699 struct mem_zone_bm_rtree *zone; memory_bm_free() local 719 struct mem_zone_bm_rtree *curr, *zone; memory_bm_find_bit() local 923 recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone) recycle_zone_bm_rtree() argument 934 struct mem_zone_bm_rtree *zone; memory_bm_recycle() local 1198 snapshot_additional_pages(struct zone *zone) snapshot_additional_pages() argument 1220 struct zone *zone; count_free_highmem_pages() local 1236 saveable_highmem_page(struct zone *zone, unsigned long pfn) saveable_highmem_page() argument 1271 struct zone *zone; count_highmem_pages() local 1309 saveable_page(struct zone *zone, unsigned long pfn) saveable_page() argument 1348 struct zone *zone; count_data_pages() local 1402 page_is_saveable(struct zone *zone, unsigned long pfn) page_is_saveable() argument 1446 struct zone *zone; copy_data_pages() local 1743 struct zone *zone; hibernate_preallocate_memory() local 1928 struct zone *zone; enough_free_mem() local [all...] |
H A D | power.h | 160 extern unsigned int snapshot_additional_pages(struct zone *zone);
|
/device/soc/hisilicon/hi3516dv300/sdk_linux/drv/osal/linux/mmz/ |
H A D | hisi_allocator.c | 210 osal_list_for_each_entry(p, &mmb->zone->mmb_list, list) { in do_mmb_alloc() 216 mmb->zone->name, __FUNCTION__, __LINE__); in do_mmb_alloc() 281 mmb->zone = fixed_mmz; in __mmb_alloc() 374 mmb->zone = fixed_mmz; in __mmb_alloc_v2() 500 hil_mmz_t *zone = NULL; in __allocator_init() local 515 zone = hil_mmz_create("null", 0, 0, 0); in __allocator_init() 516 if (zone == NULL) { in __allocator_init() 521 if (strncpy_s(zone->name, HIL_MMZ_NAME_LEN, argv[0], HIL_MMZ_NAME_LEN - 1) != EOK) { in __allocator_init() 523 hil_mmz_destroy(zone); in __allocator_init() 526 zone in __allocator_init() [all...] |
H A D | media_mem.c | 186 int hil_mmz_destroy(hil_mmz_t *zone) in hil_mmz_destroy() argument 188 if (zone == NULL) { in hil_mmz_destroy() 192 if (zone->destructor) { in hil_mmz_destroy() 193 zone->destructor(zone); in hil_mmz_destroy() 199 static int _check_mmz(hil_mmz_t *zone) in _check_mmz() argument 203 unsigned long new_start = zone->phys_start; in _check_mmz() 204 unsigned long new_end = zone->phys_start + zone->nbytes; in _check_mmz() 206 if (zone in _check_mmz() 241 hil_mmz_register(hil_mmz_t *zone) hil_mmz_register() argument 270 hil_mmz_unregister(hil_mmz_t *zone) hil_mmz_unregister() argument 791 hil_mmz_t *zone = NULL; global() local 828 _check_map_mmz(hil_mmz_t *zone) global() argument 861 hil_map_mmz_register(hil_mmz_t *zone) global() argument 896 hil_map_mmz_unregister(hil_mmz_t *zone) global() argument 929 hil_mmz_t *zone = NULL; global() local [all...] |
H A D | cma_allocator.c | 111 osal_list_for_each_entry(p, &mmb->zone->mmb_list, list) in do_mmb_alloc() 117 mmb->zone->name, __func__, __LINE__); in do_mmb_alloc() 198 mmb->zone = fixed_mmz; in __mmb_alloc() 289 mmb->zone = fixed_mmz; in __mmb_alloc_v2() 318 hil_mmz_t *mmz = mmb->zone; in __mmb_free() 567 hil_mmz_t *zone = NULL; local 589 osal_trace(KERN_ERR"can't get cma zone info:%s\n", argv[0]); 594 zone = hil_mmz_create("null", 0, 0, 0); 595 if (zone == NULL) { 599 if (strncpy_s(zone [all...] |
/device/soc/hisilicon/hi3751v350/sdk_linux/source/common/drv/mmz/ |
H A D | drv_media_mem.c | 264 int hil_mmz_destroy(const hil_mmz_t *zone) in hil_mmz_destroy() argument 266 if (zone == NULL) { in hil_mmz_destroy() 270 if (zone->destructor != NULL) { in hil_mmz_destroy() 271 zone->destructor(zone); in hil_mmz_destroy() 272 zone = NULL; in hil_mmz_destroy() 278 int hil_mmz_register(hil_mmz_t *zone) in hil_mmz_register() argument 280 if (zone == NULL) { in hil_mmz_register() 284 mmz_trace(HIL_MMZ_FMT, hil_mmz_fmt_arg(zone)); in hil_mmz_register() 287 list_add(&zone in hil_mmz_register() 293 hil_mmz_unregister(hil_mmz_t *zone) hil_mmz_unregister() argument 438 hil_mmz_t *zone = NULL; hil_mmb_getby_phys() local 494 hil_mmz_t *zone = NULL; hil_mmb_getby_sec_addr() local 1075 hil_mmz_t *zone = NULL; hil_mmb_free() local 1141 hil_mmz_t *zone = NULL; hil_mmb_getby_kvirt() local 1182 hil_mmz_t *zone = NULL; hil_mmbinfo_getby_kvirt() local 1614 hil_mmz_t *zone = NULL; mem_source_query() local 1660 hil_mmz_t *zone = NULL; sec_mem_source_query() local 1720 hil_mmz_t *zone = NULL; media_mem_parse_cmdline() local 1987 hil_mmz_t *zone = NULL; iommu_zone_init() local [all...] |
H A D | drv_media_mem.h | 135 struct hil_media_memory_zone *zone; member 193 extern int hil_mmz_destroy(const hil_mmz_t *zone); 194 extern int hil_mmz_register(hil_mmz_t *zone); 195 extern int hil_mmz_unregister(hil_mmz_t *zone);
|
H A D | drv_mmz_userdev.c | 699 hil_mmz_t *zone = NULL; in get_mmbinfo_byusraddr() local 707 list_for_each_entry(zone, &g_mmz_list, list) { in get_mmbinfo_byusraddr() 709 for (n = rb_first(&zone->root); n; n = rb_next(n)) { in get_mmbinfo_byusraddr() 711 if (zone->iommu) { in get_mmbinfo_byusraddr() 1268 hil_mmz_t *zone = NULL; in mmz_userdev_release() local 1281 list_for_each_entry_safe(zone, z, &g_mmz_list, list) { in mmz_userdev_release() 1283 for (n = rb_first(&zone->root); n; n = rb_next(n)) { in mmz_userdev_release() 1285 if (zone->iommu) { in mmz_userdev_release()
|
/device/soc/hisilicon/hi3861v100/sdk_liteos/third_party/lwip_sack/include/lwip/ |
H A D | ip6_zone.h | 10 * "zone." For unicast addresses, only link-local addresses have a scope; in 15 * zone). That link may be attached to one or more local interfaces. The 20 * In lwIP, each IPv6 address has an associated zone index. This zone index may 21 * be set to "no zone" (IP6_NO_ZONE, 0) or an actual zone. We say that an 22 * address "has a zone" or "is zoned" when its zone index is *not* set to "no 23 * zone." In lwIP, in principle, each address should be "properly zoned," which 24 * means that if the address has a zone i [all...] |
H A D | ip6_addr.h | 63 u8_t zone; member 139 /** Copy packed IPv6 address to unpacked IPv6 address; zone is not set */ 146 /** Copy unpacked IPv6 address to packed IPv6 address; zone is lost */ 179 /** Compare IPv6 networks, ignoring zone information. To be used sparingly! */ 200 /** Compare IPv6 addresses, ignoring zone information. To be used sparingly! */ 208 * part of both must be the same, and the zone must be compatible. 217 /** Compare IPv6 address to packed address and zone */ 293 * interface), the following functions are not assigning or comparing zone
|
/device/soc/rockchip/common/sdk_linux/include/linux/ |
H A D | cpuset.h | 78 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() 83 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() 233 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() 238 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed()
|
H A D | memblock.h | 254 void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, unsigned long *out_spfn, unsigned long *out_epfn); 256 * for_each_free_mem_range_in_zone - iterate through zone specific free 259 * @zone: zone in which all of the memory blocks reside 264 * zone. Available once memblock and an empty zone is initialized. The main 265 * assumption is that the zone start, end, and pgdat have been associated. 266 * This way we can use the zone to determine NUMA node, and if a given part 267 * of the memblock is valid for the zone [all...] |
H A D | mm.h | 1082 * The zone field is never updated after free_area_init_core()
1104 /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
1133 extern void memmap_init_zone_device(struct zone *, unsigned long, unsigned long, struct dev_pagemap *);
1320 * the zone since we could be using the section number id if we do not have
1323 * pages in a zone.
1494 static inline struct zone *page_zone(const struct page *page)
in page_zone() 1517 static inline void set_page_zone(struct page *page, enum zone_type zone)
in set_page_zone() argument 1520 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
in set_page_zone() 1529 static inline void set_page_links(struct page *page, enum zone_type zone, unsigned long node, unsigned long pfn)
in set_page_links() argument 1531 set_page_zone(page, zone);
in set_page_links() [all...] |
/device/soc/hisilicon/hi3516dv300/sdk_linux/drv/osal/include/ |
H A D | osal_mmz.h | 58 struct hil_media_memory_zone *zone; member 78 #define hil_mmb_zone(p) ({hil_mmb_t *__mmb=(p); OSAL_BUG_ON(__mmb==NULL); __mmb->zone; }) 239 extern int hil_mmz_destroy(hil_mmz_t *zone); 241 extern int hil_mmz_register(hil_mmz_t *zone); 242 extern int hil_mmz_unregister(hil_mmz_t *zone);
|
/device/soc/hisilicon/hi3516dv300/sdk_linux/include/ |
H A D | osal_mmz.h | 55 struct hil_media_memory_zone *zone; member 75 #define hil_mmb_zone(p) ({hil_mmb_t *__mmb=(p); OSAL_BUG_ON(__mmb==NULL); __mmb->zone; }) 236 extern int hil_mmz_destroy(hil_mmz_t *zone); 238 extern int hil_mmz_register(hil_mmz_t *zone); 239 extern int hil_mmz_unregister(hil_mmz_t *zone);
|
/device/soc/hisilicon/hi3516dv300/sdk_liteos/include/ |
H A D | osal_mmz.h | 56 struct hil_media_memory_zone *zone; member 76 #define hil_mmb_zone(p) ({hil_mmb_t *__mmb=(p); OSAL_BUG_ON(__mmb==NULL); __mmb->zone; }) 222 extern int hil_mmz_destroy(hil_mmz_t *zone); 224 extern int hil_mmz_register(hil_mmz_t *zone); 225 extern int hil_mmz_unregister(hil_mmz_t *zone);
|
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/ |
H A D | mali_kbase_mem.h | 284 * @rblink: Node in a red-black tree of memory regions within the same zone of 461 /* The starting address and size of the GPU-executable zone are dynamic 518 * Return: True if the zone for @zone_bits is a context zone, False otherwise 1134 u64 start_pfn, size_t nr_pages, int zone); 1792 * kbase_has_exec_va_zone - EXEC_VA zone predicate 1794 * Determine whether an EXEC_VA zone has been created for the GPU address space 1799 * Return: True if the kbase context has an EXEC_VA zone. 2057 * kbase_reg_zone_end_pfn - return the end Page Frame Number of @zone 2058 * @zone 2062 kbase_reg_zone_end_pfn(struct kbase_reg_zone *zone) kbase_reg_zone_end_pfn() argument 2079 struct kbase_reg_zone *zone; kbase_ctx_reg_zone_init() local [all...] |
H A D | mali_kbase_mem_linux.c | 299 int zone; in kbase_mem_alloc() local 367 /* find out which VA zone to use */ in kbase_mem_alloc() 370 zone = KBASE_REG_ZONE_SAME_VA; in kbase_mem_alloc() 373 zone = KBASE_REG_ZONE_EXEC_VA; in kbase_mem_alloc() 376 zone = KBASE_REG_ZONE_CUSTOM_VA; in kbase_mem_alloc() 380 va_pages, zone); in kbase_mem_alloc() 1521 int zone = KBASE_REG_ZONE_CUSTOM_VA; in kbase_mem_from_user_buffer() local 1581 zone = KBASE_REG_ZONE_SAME_VA; in kbase_mem_from_user_buffer() 1586 reg = kbase_alloc_free_region(rbtree, 0, *va_pages, zone); in kbase_mem_from_user_buffer()
|
H A D | mali_kbase_mem.c | 101 * to based on the memory zone the pfn refers to 111 * zone if this has been initialized. in kbase_gpu_va_to_rbtree() 570 /* The executable allocation from the SAME_VA zone would already have an in kbase_add_va_region() 572 * Also the executable allocation from EXEC_VA zone doesn't need the in kbase_add_va_region() 590 * If the allocation is not from the same zone as JIT in kbase_add_va_region() 830 /* EXEC_VA zone's codepaths are slightly easier when its base_pfn is in kbase_region_tracker_init() 881 * any of its zones. This check should be done before resizing a zone, e.g. to 882 * make space to add a second zone. 884 * Whilst a zone without allocations can be resized whilst other zones have 890 * Return: true if any allocs exist on any zone, fals 899 struct kbase_reg_zone *zone; kbase_region_tracker_has_allocs() local 1345 kbase_alloc_free_region(struct rb_root *rbtree, u64 start_pfn, size_t nr_pages, int zone) kbase_alloc_free_region() argument [all...] |
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/thirdparty/ |
H A D | mali_kbase_mmap.c | 255 * - the base_pfn of the SAME_VA zone does not change in kbase_context_get_unmapped_area() 260 * another new zone is being setup in a different thread (e.g. to in kbase_context_get_unmapped_area() 261 * borrow part of the SAME_VA zone). In the worst case, this path may in kbase_context_get_unmapped_area() 263 * zone. in kbase_context_get_unmapped_area() 270 struct kbase_reg_zone *zone = in kbase_context_get_unmapped_area() local 272 u64 same_va_end_addr = kbase_reg_zone_end_pfn(zone) << PAGE_SHIFT; in kbase_context_get_unmapped_area()
|
/device/soc/rockchip/common/kernel/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_mem_linux.c | 88 int zone; in kbase_mem_alloc() local 141 /* find out which VA zone to use */ in kbase_mem_alloc() 143 zone = KBASE_REG_ZONE_SAME_VA; in kbase_mem_alloc() 145 zone = KBASE_REG_ZONE_EXEC; in kbase_mem_alloc() 147 zone = KBASE_REG_ZONE_CUSTOM_VA; in kbase_mem_alloc() 149 reg = kbase_alloc_free_region(kctx, 0, va_pages, zone); in kbase_mem_alloc() 926 int zone = KBASE_REG_ZONE_CUSTOM_VA; in kbase_mem_from_user_buffer() local 975 zone = KBASE_REG_ZONE_SAME_VA; in kbase_mem_from_user_buffer() 978 reg = kbase_alloc_free_region(kctx, 0, *va_pages, zone); in kbase_mem_from_user_buffer()
|
H A D | mali_kbase_mem.h | 631 struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone);
|
/device/soc/rockchip/common/vendor/drivers/gpu/arm/midgard/ |
H A D | mali_kbase_mem_linux.c | 80 int zone;
in kbase_mem_alloc() local 131 /* find out which VA zone to use */
in kbase_mem_alloc() 133 zone = KBASE_REG_ZONE_SAME_VA;
in kbase_mem_alloc() 135 zone = KBASE_REG_ZONE_EXEC;
in kbase_mem_alloc() 137 zone = KBASE_REG_ZONE_CUSTOM_VA;
in kbase_mem_alloc() 140 reg = kbase_alloc_free_region(kctx, 0, va_pages, zone);
in kbase_mem_alloc() 936 int zone = KBASE_REG_ZONE_CUSTOM_VA;
in kbase_mem_from_user_buffer() local 984 zone = KBASE_REG_ZONE_SAME_VA;
in kbase_mem_from_user_buffer() 987 reg = kbase_alloc_free_region(kctx, 0, *va_pages, zone);
in kbase_mem_from_user_buffer()
|
H A D | mali_kbase_mem.h | 627 struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone);
|
/device/soc/rockchip/common/vendor/drivers/gpu/arm/bifrost/ |
H A D | mali_kbase_mem_linux.c | 311 int zone; in kbase_mem_alloc() local 382 /* find out which VA zone to use */ in kbase_mem_alloc() 385 zone = KBASE_REG_ZONE_SAME_VA; in kbase_mem_alloc() 389 zone = KBASE_REG_ZONE_EXEC_VA; in kbase_mem_alloc() 392 zone = KBASE_REG_ZONE_CUSTOM_VA; in kbase_mem_alloc() 395 reg = kbase_alloc_free_region(rbtree, PFN_DOWN(*gpu_va), va_pages, zone); in kbase_mem_alloc() 1572 int zone = KBASE_REG_ZONE_CUSTOM_VA; in kbase_mem_from_user_buffer() local 1635 zone = KBASE_REG_ZONE_SAME_VA; in kbase_mem_from_user_buffer() 1641 reg = kbase_alloc_free_region(rbtree, 0, *va_pages, zone); in kbase_mem_from_user_buffer()
|