Lines Matching refs:pgmap
44 static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
46 if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
47 pgmap->type == MEMORY_DEVICE_FS_DAX)
51 static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
53 if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
54 pgmap->type == MEMORY_DEVICE_FS_DAX)
58 static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
61 static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
73 static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
75 struct range *range = &pgmap->ranges[range_id];
80 return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
83 bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
87 for (i = 0; i < pgmap->nr_range; i++) {
88 struct range *range = &pgmap->ranges[i];
92 return pfn >= pfn_first(pgmap, i);
98 static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
100 const struct range *range = &pgmap->ranges[range_id];
115 static void dev_pagemap_kill(struct dev_pagemap *pgmap)
117 if (pgmap->ops && pgmap->ops->kill)
118 pgmap->ops->kill(pgmap);
120 percpu_ref_kill(pgmap->ref);
123 static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
125 if (pgmap->ops && pgmap->ops->cleanup) {
126 pgmap->ops->cleanup(pgmap);
128 wait_for_completion(&pgmap->done);
129 percpu_ref_exit(pgmap->ref);
132 * Undo the pgmap ref assignment for the internal case as the
133 * caller may re-enable the same pgmap.
135 if (pgmap->ref == &pgmap->internal_ref)
136 pgmap->ref = NULL;
139 static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
141 struct range *range = &pgmap->ranges[range_id];
146 first_page = pfn_to_page(pfn_first(pgmap, range_id));
154 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
159 pgmap_altmap(pgmap));
168 void memunmap_pages(struct dev_pagemap *pgmap)
173 dev_pagemap_kill(pgmap);
174 for (i = 0; i < pgmap->nr_range; i++)
175 for_each_device_pfn(pfn, pgmap, i)
177 dev_pagemap_cleanup(pgmap);
179 for (i = 0; i < pgmap->nr_range; i++)
180 pageunmap_range(pgmap, i);
182 WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
183 devmap_managed_enable_put(pgmap);
194 struct dev_pagemap *pgmap =
197 complete(&pgmap->done);
200 static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
203 struct range *range = &pgmap->ranges[range_id];
207 if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0,
236 PHYS_PFN(range->end), pgmap, GFP_KERNEL));
261 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
294 PHYS_PFN(range_len(range)), pgmap);
295 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id)
296 - pfn_first(pgmap, range_id));
314 void *memremap_pages(struct dev_pagemap *pgmap, int nid)
317 .altmap = pgmap_altmap(pgmap),
320 const int nr_range = pgmap->nr_range;
326 switch (pgmap->type) {
332 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
336 if (!pgmap->ops->page_free) {
340 if (!pgmap->owner) {
359 WARN(1, "Invalid pgmap type %d\n", pgmap->type);
363 if (!pgmap->ref) {
364 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
367 init_completion(&pgmap->done);
368 error = percpu_ref_init(&pgmap->internal_ref,
372 pgmap->ref = &pgmap->internal_ref;
374 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
380 devmap_managed_enable_get(pgmap);
383 * Clear the pgmap nr_range as it will be incremented for each
387 pgmap->nr_range = 0;
390 error = pagemap_range(pgmap, ¶ms, i, nid);
393 pgmap->nr_range++;
397 memunmap_pages(pgmap);
398 pgmap->nr_range = nr_range;
402 return __va(pgmap->ranges[0].start);
409 * @pgmap: pointer to a struct dev_pagemap
412 * 1/ At a minimum the res and type members of @pgmap must be initialized
416 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
418 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
426 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
431 ret = memremap_pages(pgmap, dev_to_node(dev));
436 pgmap);
443 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
445 devm_release_action(dev, devm_memremap_pages_release, pgmap);
465 * @pgmap: optional known pgmap that already has a reference
467 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
471 struct dev_pagemap *pgmap)
478 if (pgmap) {
479 if (phys >= pgmap->range.start && phys <= pgmap->range.end)
480 return pgmap;
481 put_dev_pagemap(pgmap);
486 pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
487 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
488 pgmap = NULL;
491 return pgmap;
530 page->pgmap->ops->page_free(page);