Lines Matching defs:zone
313 * and each of them corresonds to one zone. For each zone bitmap
317 * struct memory_bitmap contains a pointer to the main list of zone
320 * zone bitmap objects and bitmap block objects.
330 * PFNs that correspond to the start and end of the represented zone.
338 * access to the bits. There is one radix tree for each zone (as returned
367 * populated memory zone.
383 struct mem_zone_bm_rtree *zone;
391 struct linked_page *p_list; /* list of pages used to store zone
438 * linked list in order. This is guaranteed by the zone->blocks
441 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, int safe_needed, struct chain_allocator *ca)
447 block_nr = zone->blocks;
457 for (i = zone->levels; i < levels_needed; i++) {
458 node = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->nodes);
463 node->data[0] = (unsigned long)zone->rtree;
464 zone->rtree = node;
465 zone->levels += 1;
469 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
475 node = zone->rtree;
476 dst = &zone->rtree;
477 block_nr = zone->blocks;
478 for (i = zone->levels; i > 0; i--) {
482 node = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->nodes);
495 zone->blocks += 1;
501 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, int clear_nosave_free);
504 * create_zone_bm_rtree - Create a radix tree for one zone.
508 * zone.
513 struct mem_zone_bm_rtree *zone;
518 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
519 if (!zone) {
523 INIT_LIST_HEAD(&zone->nodes);
524 INIT_LIST_HEAD(&zone->leaves);
525 zone->start_pfn = start;
526 zone->end_pfn = end;
530 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
531 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
536 return zone;
546 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, int clear_nosave_free)
550 list_for_each_entry(node, &zone->nodes, list) free_image_page(node->data, clear_nosave_free);
552 list_for_each_entry(node, &zone->leaves, list) free_image_page(node->data, clear_nosave_free);
557 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree, list);
558 bm->cur.node = list_entry(bm->cur.zone->leaves.next, struct rtree_node, list);
595 struct zone *zone;
599 for_each_populated_zone(zone)
604 zone_start = zone->zone_start_pfn;
605 zone_end = zone_end_pfn(zone);
624 /* Merge this zone's range of PFNs with the existing one */
670 struct mem_zone_bm_rtree *zone;
672 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca, ext->start, ext->end);
673 if (!zone) {
677 list_add_tail(&zone->list, &bm->zones);
699 struct mem_zone_bm_rtree *zone;
701 list_for_each_entry(zone, &bm->zones, list) free_zone_bm_rtree(zone, clear_nosave_free);
712 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
719 struct mem_zone_bm_rtree *curr, *zone;
723 zone = bm->cur.zone;
725 if (pfn >= zone->start_pfn && pfn < zone->end_pfn) {
729 zone = NULL;
731 /* Find the right zone */
735 zone = curr;
740 if (!zone) {
746 * We have found the zone. Now walk the radix tree to find the leaf node
751 * If the zone we wish to scan is the current zone and the
756 if (zone == bm->cur.zone && ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) {
760 node = zone->rtree;
761 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
763 for (i = zone->levels; i > 0; i--) {
774 bm->cur.zone = zone;
776 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
780 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
853 * zone's radix tree or the first node in the radix tree of the
854 * next zone.
860 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
868 /* No more nodes, goto next zone */
869 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
870 bm->cur.zone = list_entry(bm->cur.zone->list.next, struct mem_zone_bm_rtree, list);
871 bm->cur.node = list_entry(bm->cur.zone->leaves.next, struct rtree_node, list);
898 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
902 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
923 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
927 list_for_each_entry(node, &zone->nodes, list) recycle_safe_page(node->data);
929 list_for_each_entry(node, &zone->leaves, list) recycle_safe_page(node->data);
934 struct mem_zone_bm_rtree *zone;
937 list_for_each_entry(zone, &bm->zones, list) recycle_zone_bm_rtree(zone);
1192 * @zone: Memory zone to carry out the computation for.
1195 * image data structures for @zone (usually, the returned value is greater than
1198 unsigned int snapshot_additional_pages(struct zone *zone)
1202 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1220 struct zone *zone;
1223 for_each_populated_zone(zone) if (is_highmem(zone)) cnt += zone_page_state(zone, NR_FREE_PAGES);
1236 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1245 if (!page || page_zone(page) != zone) {
1271 struct zone *zone;
1274 for_each_populated_zone(zone)
1278 if (!is_highmem(zone)) {
1282 mark_free_pages(zone);
1283 max_zone_pfn = zone_end_pfn(zone);
1284 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
1285 if (saveable_highmem_page(zone, pfn)) {
1293 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1309 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1318 if (!page || page_zone(page) != zone) {
1348 struct zone *zone;
1352 for_each_populated_zone(zone)
1354 if (is_highmem(zone)) {
1358 mark_free_pages(zone);
1359 max_zone_pfn = zone_end_pfn(zone);
1360 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
1361 if (saveable_page(zone, pfn)) {
1402 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1404 return is_highmem(zone) ? saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1436 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1446 struct zone *zone;
1449 for_each_populated_zone(zone)
1453 mark_free_pages(zone);
1454 max_zone_pfn = zone_end_pfn(zone);
1455 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
1456 if (page_is_saveable(zone, pfn)) {
1743 struct zone *zone;
1779 for_each_populated_zone(zone)
1781 size += snapshot_additional_pages(zone);
1782 if (is_highmem(zone)) {
1783 highmem += zone_page_state(zone, NR_FREE_PAGES);
1785 count += zone_page_state(zone, NR_FREE_PAGES);
1813 * To avoid excessive pressure on the normal zone, leave room in it to
1928 struct zone *zone;
1931 for_each_populated_zone(zone) if (!is_highmem(zone)) free += zone_page_state(zone, NR_FREE_PAGES);