162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 262306a36Sopenharmony_ci/* Copyright(c) 2015 Intel Corporation. All rights reserved. */ 362306a36Sopenharmony_ci#include <linux/device.h> 462306a36Sopenharmony_ci#include <linux/io.h> 562306a36Sopenharmony_ci#include <linux/kasan.h> 662306a36Sopenharmony_ci#include <linux/memory_hotplug.h> 762306a36Sopenharmony_ci#include <linux/memremap.h> 862306a36Sopenharmony_ci#include <linux/pfn_t.h> 962306a36Sopenharmony_ci#include <linux/swap.h> 1062306a36Sopenharmony_ci#include <linux/mmzone.h> 1162306a36Sopenharmony_ci#include <linux/swapops.h> 1262306a36Sopenharmony_ci#include <linux/types.h> 1362306a36Sopenharmony_ci#include <linux/wait_bit.h> 1462306a36Sopenharmony_ci#include <linux/xarray.h> 1562306a36Sopenharmony_ci#include "internal.h" 1662306a36Sopenharmony_ci 1762306a36Sopenharmony_cistatic DEFINE_XARRAY(pgmap_array); 1862306a36Sopenharmony_ci 1962306a36Sopenharmony_ci/* 2062306a36Sopenharmony_ci * The memremap() and memremap_pages() interfaces are alternately used 2162306a36Sopenharmony_ci * to map persistent memory namespaces. These interfaces place different 2262306a36Sopenharmony_ci * constraints on the alignment and size of the mapping (namespace). 2362306a36Sopenharmony_ci * memremap() can map individual PAGE_SIZE pages. memremap_pages() can 2462306a36Sopenharmony_ci * only map subsections (2MB), and at least one architecture (PowerPC) 2562306a36Sopenharmony_ci * the minimum mapping granularity of memremap_pages() is 16MB. 2662306a36Sopenharmony_ci * 2762306a36Sopenharmony_ci * The role of memremap_compat_align() is to communicate the minimum 2862306a36Sopenharmony_ci * arch supported alignment of a namespace such that it can freely 2962306a36Sopenharmony_ci * switch modes without violating the arch constraint. Namely, do not 3062306a36Sopenharmony_ci * allow a namespace to be PAGE_SIZE aligned since that namespace may be 3162306a36Sopenharmony_ci * reconfigured into a mode that requires SUBSECTION_SIZE alignment. 3262306a36Sopenharmony_ci */ 3362306a36Sopenharmony_ci#ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN 3462306a36Sopenharmony_ciunsigned long memremap_compat_align(void) 3562306a36Sopenharmony_ci{ 3662306a36Sopenharmony_ci return SUBSECTION_SIZE; 3762306a36Sopenharmony_ci} 3862306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(memremap_compat_align); 3962306a36Sopenharmony_ci#endif 4062306a36Sopenharmony_ci 4162306a36Sopenharmony_ci#ifdef CONFIG_FS_DAX 4262306a36Sopenharmony_ciDEFINE_STATIC_KEY_FALSE(devmap_managed_key); 4362306a36Sopenharmony_ciEXPORT_SYMBOL(devmap_managed_key); 4462306a36Sopenharmony_ci 4562306a36Sopenharmony_cistatic void devmap_managed_enable_put(struct dev_pagemap *pgmap) 4662306a36Sopenharmony_ci{ 4762306a36Sopenharmony_ci if (pgmap->type == MEMORY_DEVICE_FS_DAX) 4862306a36Sopenharmony_ci static_branch_dec(&devmap_managed_key); 4962306a36Sopenharmony_ci} 5062306a36Sopenharmony_ci 5162306a36Sopenharmony_cistatic void devmap_managed_enable_get(struct dev_pagemap *pgmap) 5262306a36Sopenharmony_ci{ 5362306a36Sopenharmony_ci if (pgmap->type == MEMORY_DEVICE_FS_DAX) 5462306a36Sopenharmony_ci static_branch_inc(&devmap_managed_key); 5562306a36Sopenharmony_ci} 5662306a36Sopenharmony_ci#else 5762306a36Sopenharmony_cistatic void devmap_managed_enable_get(struct dev_pagemap *pgmap) 5862306a36Sopenharmony_ci{ 5962306a36Sopenharmony_ci} 6062306a36Sopenharmony_cistatic void devmap_managed_enable_put(struct dev_pagemap *pgmap) 6162306a36Sopenharmony_ci{ 6262306a36Sopenharmony_ci} 6362306a36Sopenharmony_ci#endif /* CONFIG_FS_DAX */ 6462306a36Sopenharmony_ci 6562306a36Sopenharmony_cistatic void pgmap_array_delete(struct range *range) 6662306a36Sopenharmony_ci{ 6762306a36Sopenharmony_ci xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), 6862306a36Sopenharmony_ci NULL, GFP_KERNEL); 6962306a36Sopenharmony_ci synchronize_rcu(); 7062306a36Sopenharmony_ci} 7162306a36Sopenharmony_ci 7262306a36Sopenharmony_cistatic unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id) 7362306a36Sopenharmony_ci{ 7462306a36Sopenharmony_ci struct range *range = &pgmap->ranges[range_id]; 7562306a36Sopenharmony_ci unsigned long pfn = PHYS_PFN(range->start); 7662306a36Sopenharmony_ci 7762306a36Sopenharmony_ci if (range_id) 7862306a36Sopenharmony_ci return pfn; 7962306a36Sopenharmony_ci return pfn + vmem_altmap_offset(pgmap_altmap(pgmap)); 8062306a36Sopenharmony_ci} 8162306a36Sopenharmony_ci 8262306a36Sopenharmony_cibool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) 8362306a36Sopenharmony_ci{ 8462306a36Sopenharmony_ci int i; 8562306a36Sopenharmony_ci 8662306a36Sopenharmony_ci for (i = 0; i < pgmap->nr_range; i++) { 8762306a36Sopenharmony_ci struct range *range = &pgmap->ranges[i]; 8862306a36Sopenharmony_ci 8962306a36Sopenharmony_ci if (pfn >= PHYS_PFN(range->start) && 9062306a36Sopenharmony_ci pfn <= PHYS_PFN(range->end)) 9162306a36Sopenharmony_ci return pfn >= pfn_first(pgmap, i); 9262306a36Sopenharmony_ci } 9362306a36Sopenharmony_ci 9462306a36Sopenharmony_ci return false; 9562306a36Sopenharmony_ci} 9662306a36Sopenharmony_ci 9762306a36Sopenharmony_cistatic unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id) 9862306a36Sopenharmony_ci{ 9962306a36Sopenharmony_ci const struct range *range = &pgmap->ranges[range_id]; 10062306a36Sopenharmony_ci 10162306a36Sopenharmony_ci return (range->start + range_len(range)) >> PAGE_SHIFT; 10262306a36Sopenharmony_ci} 10362306a36Sopenharmony_ci 10462306a36Sopenharmony_cistatic unsigned long pfn_len(struct dev_pagemap *pgmap, unsigned long range_id) 10562306a36Sopenharmony_ci{ 10662306a36Sopenharmony_ci return (pfn_end(pgmap, range_id) - 10762306a36Sopenharmony_ci pfn_first(pgmap, range_id)) >> pgmap->vmemmap_shift; 10862306a36Sopenharmony_ci} 10962306a36Sopenharmony_ci 11062306a36Sopenharmony_cistatic void pageunmap_range(struct dev_pagemap *pgmap, int range_id) 11162306a36Sopenharmony_ci{ 11262306a36Sopenharmony_ci struct range *range = &pgmap->ranges[range_id]; 11362306a36Sopenharmony_ci struct page *first_page; 11462306a36Sopenharmony_ci 11562306a36Sopenharmony_ci /* make sure to access a memmap that was actually initialized */ 11662306a36Sopenharmony_ci first_page = pfn_to_page(pfn_first(pgmap, range_id)); 11762306a36Sopenharmony_ci 11862306a36Sopenharmony_ci /* pages are dead and unused, undo the arch mapping */ 11962306a36Sopenharmony_ci mem_hotplug_begin(); 12062306a36Sopenharmony_ci remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start), 12162306a36Sopenharmony_ci PHYS_PFN(range_len(range))); 12262306a36Sopenharmony_ci if (pgmap->type == MEMORY_DEVICE_PRIVATE) { 12362306a36Sopenharmony_ci __remove_pages(PHYS_PFN(range->start), 12462306a36Sopenharmony_ci PHYS_PFN(range_len(range)), NULL); 12562306a36Sopenharmony_ci } else { 12662306a36Sopenharmony_ci arch_remove_memory(range->start, range_len(range), 12762306a36Sopenharmony_ci pgmap_altmap(pgmap)); 12862306a36Sopenharmony_ci kasan_remove_zero_shadow(__va(range->start), range_len(range)); 12962306a36Sopenharmony_ci } 13062306a36Sopenharmony_ci mem_hotplug_done(); 13162306a36Sopenharmony_ci 13262306a36Sopenharmony_ci untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range), true); 13362306a36Sopenharmony_ci pgmap_array_delete(range); 13462306a36Sopenharmony_ci} 13562306a36Sopenharmony_ci 13662306a36Sopenharmony_civoid memunmap_pages(struct dev_pagemap *pgmap) 13762306a36Sopenharmony_ci{ 13862306a36Sopenharmony_ci int i; 13962306a36Sopenharmony_ci 14062306a36Sopenharmony_ci percpu_ref_kill(&pgmap->ref); 14162306a36Sopenharmony_ci if (pgmap->type != MEMORY_DEVICE_PRIVATE && 14262306a36Sopenharmony_ci pgmap->type != MEMORY_DEVICE_COHERENT) 14362306a36Sopenharmony_ci for (i = 0; i < pgmap->nr_range; i++) 14462306a36Sopenharmony_ci percpu_ref_put_many(&pgmap->ref, pfn_len(pgmap, i)); 14562306a36Sopenharmony_ci 14662306a36Sopenharmony_ci wait_for_completion(&pgmap->done); 14762306a36Sopenharmony_ci 14862306a36Sopenharmony_ci for (i = 0; i < pgmap->nr_range; i++) 14962306a36Sopenharmony_ci pageunmap_range(pgmap, i); 15062306a36Sopenharmony_ci percpu_ref_exit(&pgmap->ref); 15162306a36Sopenharmony_ci 15262306a36Sopenharmony_ci WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); 15362306a36Sopenharmony_ci devmap_managed_enable_put(pgmap); 15462306a36Sopenharmony_ci} 15562306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(memunmap_pages); 15662306a36Sopenharmony_ci 15762306a36Sopenharmony_cistatic void devm_memremap_pages_release(void *data) 15862306a36Sopenharmony_ci{ 15962306a36Sopenharmony_ci memunmap_pages(data); 16062306a36Sopenharmony_ci} 16162306a36Sopenharmony_ci 16262306a36Sopenharmony_cistatic void dev_pagemap_percpu_release(struct percpu_ref *ref) 16362306a36Sopenharmony_ci{ 16462306a36Sopenharmony_ci struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref); 16562306a36Sopenharmony_ci 16662306a36Sopenharmony_ci complete(&pgmap->done); 16762306a36Sopenharmony_ci} 16862306a36Sopenharmony_ci 16962306a36Sopenharmony_cistatic int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, 17062306a36Sopenharmony_ci int range_id, int nid) 17162306a36Sopenharmony_ci{ 17262306a36Sopenharmony_ci const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE; 17362306a36Sopenharmony_ci struct range *range = &pgmap->ranges[range_id]; 17462306a36Sopenharmony_ci struct dev_pagemap *conflict_pgmap; 17562306a36Sopenharmony_ci int error, is_ram; 17662306a36Sopenharmony_ci 17762306a36Sopenharmony_ci if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0, 17862306a36Sopenharmony_ci "altmap not supported for multiple ranges\n")) 17962306a36Sopenharmony_ci return -EINVAL; 18062306a36Sopenharmony_ci 18162306a36Sopenharmony_ci conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL); 18262306a36Sopenharmony_ci if (conflict_pgmap) { 18362306a36Sopenharmony_ci WARN(1, "Conflicting mapping in same section\n"); 18462306a36Sopenharmony_ci put_dev_pagemap(conflict_pgmap); 18562306a36Sopenharmony_ci return -ENOMEM; 18662306a36Sopenharmony_ci } 18762306a36Sopenharmony_ci 18862306a36Sopenharmony_ci conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL); 18962306a36Sopenharmony_ci if (conflict_pgmap) { 19062306a36Sopenharmony_ci WARN(1, "Conflicting mapping in same section\n"); 19162306a36Sopenharmony_ci put_dev_pagemap(conflict_pgmap); 19262306a36Sopenharmony_ci return -ENOMEM; 19362306a36Sopenharmony_ci } 19462306a36Sopenharmony_ci 19562306a36Sopenharmony_ci is_ram = region_intersects(range->start, range_len(range), 19662306a36Sopenharmony_ci IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); 19762306a36Sopenharmony_ci 19862306a36Sopenharmony_ci if (is_ram != REGION_DISJOINT) { 19962306a36Sopenharmony_ci WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n", 20062306a36Sopenharmony_ci is_ram == REGION_MIXED ? "mixed" : "ram", 20162306a36Sopenharmony_ci range->start, range->end); 20262306a36Sopenharmony_ci return -ENXIO; 20362306a36Sopenharmony_ci } 20462306a36Sopenharmony_ci 20562306a36Sopenharmony_ci error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start), 20662306a36Sopenharmony_ci PHYS_PFN(range->end), pgmap, GFP_KERNEL)); 20762306a36Sopenharmony_ci if (error) 20862306a36Sopenharmony_ci return error; 20962306a36Sopenharmony_ci 21062306a36Sopenharmony_ci if (nid < 0) 21162306a36Sopenharmony_ci nid = numa_mem_id(); 21262306a36Sopenharmony_ci 21362306a36Sopenharmony_ci error = track_pfn_remap(NULL, ¶ms->pgprot, PHYS_PFN(range->start), 0, 21462306a36Sopenharmony_ci range_len(range)); 21562306a36Sopenharmony_ci if (error) 21662306a36Sopenharmony_ci goto err_pfn_remap; 21762306a36Sopenharmony_ci 21862306a36Sopenharmony_ci if (!mhp_range_allowed(range->start, range_len(range), !is_private)) { 21962306a36Sopenharmony_ci error = -EINVAL; 22062306a36Sopenharmony_ci goto err_kasan; 22162306a36Sopenharmony_ci } 22262306a36Sopenharmony_ci 22362306a36Sopenharmony_ci mem_hotplug_begin(); 22462306a36Sopenharmony_ci 22562306a36Sopenharmony_ci /* 22662306a36Sopenharmony_ci * For device private memory we call add_pages() as we only need to 22762306a36Sopenharmony_ci * allocate and initialize struct page for the device memory. More- 22862306a36Sopenharmony_ci * over the device memory is un-accessible thus we do not want to 22962306a36Sopenharmony_ci * create a linear mapping for the memory like arch_add_memory() 23062306a36Sopenharmony_ci * would do. 23162306a36Sopenharmony_ci * 23262306a36Sopenharmony_ci * For all other device memory types, which are accessible by 23362306a36Sopenharmony_ci * the CPU, we do want the linear mapping and thus use 23462306a36Sopenharmony_ci * arch_add_memory(). 23562306a36Sopenharmony_ci */ 23662306a36Sopenharmony_ci if (is_private) { 23762306a36Sopenharmony_ci error = add_pages(nid, PHYS_PFN(range->start), 23862306a36Sopenharmony_ci PHYS_PFN(range_len(range)), params); 23962306a36Sopenharmony_ci } else { 24062306a36Sopenharmony_ci error = kasan_add_zero_shadow(__va(range->start), range_len(range)); 24162306a36Sopenharmony_ci if (error) { 24262306a36Sopenharmony_ci mem_hotplug_done(); 24362306a36Sopenharmony_ci goto err_kasan; 24462306a36Sopenharmony_ci } 24562306a36Sopenharmony_ci 24662306a36Sopenharmony_ci error = arch_add_memory(nid, range->start, range_len(range), 24762306a36Sopenharmony_ci params); 24862306a36Sopenharmony_ci } 24962306a36Sopenharmony_ci 25062306a36Sopenharmony_ci if (!error) { 25162306a36Sopenharmony_ci struct zone *zone; 25262306a36Sopenharmony_ci 25362306a36Sopenharmony_ci zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; 25462306a36Sopenharmony_ci move_pfn_range_to_zone(zone, PHYS_PFN(range->start), 25562306a36Sopenharmony_ci PHYS_PFN(range_len(range)), params->altmap, 25662306a36Sopenharmony_ci MIGRATE_MOVABLE); 25762306a36Sopenharmony_ci } 25862306a36Sopenharmony_ci 25962306a36Sopenharmony_ci mem_hotplug_done(); 26062306a36Sopenharmony_ci if (error) 26162306a36Sopenharmony_ci goto err_add_memory; 26262306a36Sopenharmony_ci 26362306a36Sopenharmony_ci /* 26462306a36Sopenharmony_ci * Initialization of the pages has been deferred until now in order 26562306a36Sopenharmony_ci * to allow us to do the work while not holding the hotplug lock. 26662306a36Sopenharmony_ci */ 26762306a36Sopenharmony_ci memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], 26862306a36Sopenharmony_ci PHYS_PFN(range->start), 26962306a36Sopenharmony_ci PHYS_PFN(range_len(range)), pgmap); 27062306a36Sopenharmony_ci if (pgmap->type != MEMORY_DEVICE_PRIVATE && 27162306a36Sopenharmony_ci pgmap->type != MEMORY_DEVICE_COHERENT) 27262306a36Sopenharmony_ci percpu_ref_get_many(&pgmap->ref, pfn_len(pgmap, range_id)); 27362306a36Sopenharmony_ci return 0; 27462306a36Sopenharmony_ci 27562306a36Sopenharmony_cierr_add_memory: 27662306a36Sopenharmony_ci if (!is_private) 27762306a36Sopenharmony_ci kasan_remove_zero_shadow(__va(range->start), range_len(range)); 27862306a36Sopenharmony_cierr_kasan: 27962306a36Sopenharmony_ci untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range), true); 28062306a36Sopenharmony_cierr_pfn_remap: 28162306a36Sopenharmony_ci pgmap_array_delete(range); 28262306a36Sopenharmony_ci return error; 28362306a36Sopenharmony_ci} 28462306a36Sopenharmony_ci 28562306a36Sopenharmony_ci 28662306a36Sopenharmony_ci/* 28762306a36Sopenharmony_ci * Not device managed version of devm_memremap_pages, undone by 28862306a36Sopenharmony_ci * memunmap_pages(). Please use devm_memremap_pages if you have a struct 28962306a36Sopenharmony_ci * device available. 29062306a36Sopenharmony_ci */ 29162306a36Sopenharmony_civoid *memremap_pages(struct dev_pagemap *pgmap, int nid) 29262306a36Sopenharmony_ci{ 29362306a36Sopenharmony_ci struct mhp_params params = { 29462306a36Sopenharmony_ci .altmap = pgmap_altmap(pgmap), 29562306a36Sopenharmony_ci .pgmap = pgmap, 29662306a36Sopenharmony_ci .pgprot = PAGE_KERNEL, 29762306a36Sopenharmony_ci }; 29862306a36Sopenharmony_ci const int nr_range = pgmap->nr_range; 29962306a36Sopenharmony_ci int error, i; 30062306a36Sopenharmony_ci 30162306a36Sopenharmony_ci if (WARN_ONCE(!nr_range, "nr_range must be specified\n")) 30262306a36Sopenharmony_ci return ERR_PTR(-EINVAL); 30362306a36Sopenharmony_ci 30462306a36Sopenharmony_ci switch (pgmap->type) { 30562306a36Sopenharmony_ci case MEMORY_DEVICE_PRIVATE: 30662306a36Sopenharmony_ci if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) { 30762306a36Sopenharmony_ci WARN(1, "Device private memory not supported\n"); 30862306a36Sopenharmony_ci return ERR_PTR(-EINVAL); 30962306a36Sopenharmony_ci } 31062306a36Sopenharmony_ci if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { 31162306a36Sopenharmony_ci WARN(1, "Missing migrate_to_ram method\n"); 31262306a36Sopenharmony_ci return ERR_PTR(-EINVAL); 31362306a36Sopenharmony_ci } 31462306a36Sopenharmony_ci if (!pgmap->ops->page_free) { 31562306a36Sopenharmony_ci WARN(1, "Missing page_free method\n"); 31662306a36Sopenharmony_ci return ERR_PTR(-EINVAL); 31762306a36Sopenharmony_ci } 31862306a36Sopenharmony_ci if (!pgmap->owner) { 31962306a36Sopenharmony_ci WARN(1, "Missing owner\n"); 32062306a36Sopenharmony_ci return ERR_PTR(-EINVAL); 32162306a36Sopenharmony_ci } 32262306a36Sopenharmony_ci break; 32362306a36Sopenharmony_ci case MEMORY_DEVICE_COHERENT: 32462306a36Sopenharmony_ci if (!pgmap->ops->page_free) { 32562306a36Sopenharmony_ci WARN(1, "Missing page_free method\n"); 32662306a36Sopenharmony_ci return ERR_PTR(-EINVAL); 32762306a36Sopenharmony_ci } 32862306a36Sopenharmony_ci if (!pgmap->owner) { 32962306a36Sopenharmony_ci WARN(1, "Missing owner\n"); 33062306a36Sopenharmony_ci return ERR_PTR(-EINVAL); 33162306a36Sopenharmony_ci } 33262306a36Sopenharmony_ci break; 33362306a36Sopenharmony_ci case MEMORY_DEVICE_FS_DAX: 33462306a36Sopenharmony_ci if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) { 33562306a36Sopenharmony_ci WARN(1, "File system DAX not supported\n"); 33662306a36Sopenharmony_ci return ERR_PTR(-EINVAL); 33762306a36Sopenharmony_ci } 33862306a36Sopenharmony_ci params.pgprot = pgprot_decrypted(params.pgprot); 33962306a36Sopenharmony_ci break; 34062306a36Sopenharmony_ci case MEMORY_DEVICE_GENERIC: 34162306a36Sopenharmony_ci break; 34262306a36Sopenharmony_ci case MEMORY_DEVICE_PCI_P2PDMA: 34362306a36Sopenharmony_ci params.pgprot = pgprot_noncached(params.pgprot); 34462306a36Sopenharmony_ci break; 34562306a36Sopenharmony_ci default: 34662306a36Sopenharmony_ci WARN(1, "Invalid pgmap type %d\n", pgmap->type); 34762306a36Sopenharmony_ci break; 34862306a36Sopenharmony_ci } 34962306a36Sopenharmony_ci 35062306a36Sopenharmony_ci init_completion(&pgmap->done); 35162306a36Sopenharmony_ci error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0, 35262306a36Sopenharmony_ci GFP_KERNEL); 35362306a36Sopenharmony_ci if (error) 35462306a36Sopenharmony_ci return ERR_PTR(error); 35562306a36Sopenharmony_ci 35662306a36Sopenharmony_ci devmap_managed_enable_get(pgmap); 35762306a36Sopenharmony_ci 35862306a36Sopenharmony_ci /* 35962306a36Sopenharmony_ci * Clear the pgmap nr_range as it will be incremented for each 36062306a36Sopenharmony_ci * successfully processed range. This communicates how many 36162306a36Sopenharmony_ci * regions to unwind in the abort case. 36262306a36Sopenharmony_ci */ 36362306a36Sopenharmony_ci pgmap->nr_range = 0; 36462306a36Sopenharmony_ci error = 0; 36562306a36Sopenharmony_ci for (i = 0; i < nr_range; i++) { 36662306a36Sopenharmony_ci error = pagemap_range(pgmap, ¶ms, i, nid); 36762306a36Sopenharmony_ci if (error) 36862306a36Sopenharmony_ci break; 36962306a36Sopenharmony_ci pgmap->nr_range++; 37062306a36Sopenharmony_ci } 37162306a36Sopenharmony_ci 37262306a36Sopenharmony_ci if (i < nr_range) { 37362306a36Sopenharmony_ci memunmap_pages(pgmap); 37462306a36Sopenharmony_ci pgmap->nr_range = nr_range; 37562306a36Sopenharmony_ci return ERR_PTR(error); 37662306a36Sopenharmony_ci } 37762306a36Sopenharmony_ci 37862306a36Sopenharmony_ci return __va(pgmap->ranges[0].start); 37962306a36Sopenharmony_ci} 38062306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(memremap_pages); 38162306a36Sopenharmony_ci 38262306a36Sopenharmony_ci/** 38362306a36Sopenharmony_ci * devm_memremap_pages - remap and provide memmap backing for the given resource 38462306a36Sopenharmony_ci * @dev: hosting device for @res 38562306a36Sopenharmony_ci * @pgmap: pointer to a struct dev_pagemap 38662306a36Sopenharmony_ci * 38762306a36Sopenharmony_ci * Notes: 38862306a36Sopenharmony_ci * 1/ At a minimum the range and type members of @pgmap must be initialized 38962306a36Sopenharmony_ci * by the caller before passing it to this function 39062306a36Sopenharmony_ci * 39162306a36Sopenharmony_ci * 2/ The altmap field may optionally be initialized, in which case 39262306a36Sopenharmony_ci * PGMAP_ALTMAP_VALID must be set in pgmap->flags. 39362306a36Sopenharmony_ci * 39462306a36Sopenharmony_ci * 3/ The ref field may optionally be provided, in which pgmap->ref must be 39562306a36Sopenharmony_ci * 'live' on entry and will be killed and reaped at 39662306a36Sopenharmony_ci * devm_memremap_pages_release() time, or if this routine fails. 39762306a36Sopenharmony_ci * 39862306a36Sopenharmony_ci * 4/ range is expected to be a host memory range that could feasibly be 39962306a36Sopenharmony_ci * treated as a "System RAM" range, i.e. not a device mmio range, but 40062306a36Sopenharmony_ci * this is not enforced. 40162306a36Sopenharmony_ci */ 40262306a36Sopenharmony_civoid *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) 40362306a36Sopenharmony_ci{ 40462306a36Sopenharmony_ci int error; 40562306a36Sopenharmony_ci void *ret; 40662306a36Sopenharmony_ci 40762306a36Sopenharmony_ci ret = memremap_pages(pgmap, dev_to_node(dev)); 40862306a36Sopenharmony_ci if (IS_ERR(ret)) 40962306a36Sopenharmony_ci return ret; 41062306a36Sopenharmony_ci 41162306a36Sopenharmony_ci error = devm_add_action_or_reset(dev, devm_memremap_pages_release, 41262306a36Sopenharmony_ci pgmap); 41362306a36Sopenharmony_ci if (error) 41462306a36Sopenharmony_ci return ERR_PTR(error); 41562306a36Sopenharmony_ci return ret; 41662306a36Sopenharmony_ci} 41762306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(devm_memremap_pages); 41862306a36Sopenharmony_ci 41962306a36Sopenharmony_civoid devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap) 42062306a36Sopenharmony_ci{ 42162306a36Sopenharmony_ci devm_release_action(dev, devm_memremap_pages_release, pgmap); 42262306a36Sopenharmony_ci} 42362306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(devm_memunmap_pages); 42462306a36Sopenharmony_ci 42562306a36Sopenharmony_ciunsigned long vmem_altmap_offset(struct vmem_altmap *altmap) 42662306a36Sopenharmony_ci{ 42762306a36Sopenharmony_ci /* number of pfns from base where pfn_to_page() is valid */ 42862306a36Sopenharmony_ci if (altmap) 42962306a36Sopenharmony_ci return altmap->reserve + altmap->free; 43062306a36Sopenharmony_ci return 0; 43162306a36Sopenharmony_ci} 43262306a36Sopenharmony_ci 43362306a36Sopenharmony_civoid vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) 43462306a36Sopenharmony_ci{ 43562306a36Sopenharmony_ci altmap->alloc -= nr_pfns; 43662306a36Sopenharmony_ci} 43762306a36Sopenharmony_ci 43862306a36Sopenharmony_ci/** 43962306a36Sopenharmony_ci * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn 44062306a36Sopenharmony_ci * @pfn: page frame number to lookup page_map 44162306a36Sopenharmony_ci * @pgmap: optional known pgmap that already has a reference 44262306a36Sopenharmony_ci * 44362306a36Sopenharmony_ci * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap 44462306a36Sopenharmony_ci * is non-NULL but does not cover @pfn the reference to it will be released. 44562306a36Sopenharmony_ci */ 44662306a36Sopenharmony_cistruct dev_pagemap *get_dev_pagemap(unsigned long pfn, 44762306a36Sopenharmony_ci struct dev_pagemap *pgmap) 44862306a36Sopenharmony_ci{ 44962306a36Sopenharmony_ci resource_size_t phys = PFN_PHYS(pfn); 45062306a36Sopenharmony_ci 45162306a36Sopenharmony_ci /* 45262306a36Sopenharmony_ci * In the cached case we're already holding a live reference. 45362306a36Sopenharmony_ci */ 45462306a36Sopenharmony_ci if (pgmap) { 45562306a36Sopenharmony_ci if (phys >= pgmap->range.start && phys <= pgmap->range.end) 45662306a36Sopenharmony_ci return pgmap; 45762306a36Sopenharmony_ci put_dev_pagemap(pgmap); 45862306a36Sopenharmony_ci } 45962306a36Sopenharmony_ci 46062306a36Sopenharmony_ci /* fall back to slow path lookup */ 46162306a36Sopenharmony_ci rcu_read_lock(); 46262306a36Sopenharmony_ci pgmap = xa_load(&pgmap_array, PHYS_PFN(phys)); 46362306a36Sopenharmony_ci if (pgmap && !percpu_ref_tryget_live_rcu(&pgmap->ref)) 46462306a36Sopenharmony_ci pgmap = NULL; 46562306a36Sopenharmony_ci rcu_read_unlock(); 46662306a36Sopenharmony_ci 46762306a36Sopenharmony_ci return pgmap; 46862306a36Sopenharmony_ci} 46962306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(get_dev_pagemap); 47062306a36Sopenharmony_ci 47162306a36Sopenharmony_civoid free_zone_device_page(struct page *page) 47262306a36Sopenharmony_ci{ 47362306a36Sopenharmony_ci if (WARN_ON_ONCE(!page->pgmap->ops || !page->pgmap->ops->page_free)) 47462306a36Sopenharmony_ci return; 47562306a36Sopenharmony_ci 47662306a36Sopenharmony_ci mem_cgroup_uncharge(page_folio(page)); 47762306a36Sopenharmony_ci 47862306a36Sopenharmony_ci /* 47962306a36Sopenharmony_ci * Note: we don't expect anonymous compound pages yet. Once supported 48062306a36Sopenharmony_ci * and we could PTE-map them similar to THP, we'd have to clear 48162306a36Sopenharmony_ci * PG_anon_exclusive on all tail pages. 48262306a36Sopenharmony_ci */ 48362306a36Sopenharmony_ci VM_BUG_ON_PAGE(PageAnon(page) && PageCompound(page), page); 48462306a36Sopenharmony_ci if (PageAnon(page)) 48562306a36Sopenharmony_ci __ClearPageAnonExclusive(page); 48662306a36Sopenharmony_ci 48762306a36Sopenharmony_ci /* 48862306a36Sopenharmony_ci * When a device managed page is freed, the page->mapping field 48962306a36Sopenharmony_ci * may still contain a (stale) mapping value. For example, the 49062306a36Sopenharmony_ci * lower bits of page->mapping may still identify the page as an 49162306a36Sopenharmony_ci * anonymous page. Ultimately, this entire field is just stale 49262306a36Sopenharmony_ci * and wrong, and it will cause errors if not cleared. One 49362306a36Sopenharmony_ci * example is: 49462306a36Sopenharmony_ci * 49562306a36Sopenharmony_ci * migrate_vma_pages() 49662306a36Sopenharmony_ci * migrate_vma_insert_page() 49762306a36Sopenharmony_ci * page_add_new_anon_rmap() 49862306a36Sopenharmony_ci * __page_set_anon_rmap() 49962306a36Sopenharmony_ci * ...checks page->mapping, via PageAnon(page) call, 50062306a36Sopenharmony_ci * and incorrectly concludes that the page is an 50162306a36Sopenharmony_ci * anonymous page. Therefore, it incorrectly, 50262306a36Sopenharmony_ci * silently fails to set up the new anon rmap. 50362306a36Sopenharmony_ci * 50462306a36Sopenharmony_ci * For other types of ZONE_DEVICE pages, migration is either 50562306a36Sopenharmony_ci * handled differently or not done at all, so there is no need 50662306a36Sopenharmony_ci * to clear page->mapping. 50762306a36Sopenharmony_ci */ 50862306a36Sopenharmony_ci page->mapping = NULL; 50962306a36Sopenharmony_ci page->pgmap->ops->page_free(page); 51062306a36Sopenharmony_ci 51162306a36Sopenharmony_ci if (page->pgmap->type != MEMORY_DEVICE_PRIVATE && 51262306a36Sopenharmony_ci page->pgmap->type != MEMORY_DEVICE_COHERENT) 51362306a36Sopenharmony_ci /* 51462306a36Sopenharmony_ci * Reset the page count to 1 to prepare for handing out the page 51562306a36Sopenharmony_ci * again. 51662306a36Sopenharmony_ci */ 51762306a36Sopenharmony_ci set_page_count(page, 1); 51862306a36Sopenharmony_ci else 51962306a36Sopenharmony_ci put_dev_pagemap(page->pgmap); 52062306a36Sopenharmony_ci} 52162306a36Sopenharmony_ci 52262306a36Sopenharmony_civoid zone_device_page_init(struct page *page) 52362306a36Sopenharmony_ci{ 52462306a36Sopenharmony_ci /* 52562306a36Sopenharmony_ci * Drivers shouldn't be allocating pages after calling 52662306a36Sopenharmony_ci * memunmap_pages(). 52762306a36Sopenharmony_ci */ 52862306a36Sopenharmony_ci WARN_ON_ONCE(!percpu_ref_tryget_live(&page->pgmap->ref)); 52962306a36Sopenharmony_ci set_page_count(page, 1); 53062306a36Sopenharmony_ci lock_page(page); 53162306a36Sopenharmony_ci} 53262306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(zone_device_page_init); 53362306a36Sopenharmony_ci 53462306a36Sopenharmony_ci#ifdef CONFIG_FS_DAX 53562306a36Sopenharmony_cibool __put_devmap_managed_page_refs(struct page *page, int refs) 53662306a36Sopenharmony_ci{ 53762306a36Sopenharmony_ci if (page->pgmap->type != MEMORY_DEVICE_FS_DAX) 53862306a36Sopenharmony_ci return false; 53962306a36Sopenharmony_ci 54062306a36Sopenharmony_ci /* 54162306a36Sopenharmony_ci * fsdax page refcounts are 1-based, rather than 0-based: if 54262306a36Sopenharmony_ci * refcount is 1, then the page is free and the refcount is 54362306a36Sopenharmony_ci * stable because nobody holds a reference on the page. 54462306a36Sopenharmony_ci */ 54562306a36Sopenharmony_ci if (page_ref_sub_return(page, refs) == 1) 54662306a36Sopenharmony_ci wake_up_var(&page->_refcount); 54762306a36Sopenharmony_ci return true; 54862306a36Sopenharmony_ci} 54962306a36Sopenharmony_ciEXPORT_SYMBOL(__put_devmap_managed_page_refs); 55062306a36Sopenharmony_ci#endif /* CONFIG_FS_DAX */ 551