Lines Matching refs:pgmap

44 static void devmap_managed_enable_put(struct dev_pagemap *pgmap)  in devmap_managed_enable_put()  argument
46 if (pgmap->type == MEMORY_DEVICE_PRIVATE || in devmap_managed_enable_put()
47 pgmap->type == MEMORY_DEVICE_FS_DAX) in devmap_managed_enable_put()
51 static void devmap_managed_enable_get(struct dev_pagemap *pgmap) in devmap_managed_enable_get() argument
53 if (pgmap->type == MEMORY_DEVICE_PRIVATE || in devmap_managed_enable_get()
54 pgmap->type == MEMORY_DEVICE_FS_DAX) in devmap_managed_enable_get()
58 static void devmap_managed_enable_get(struct dev_pagemap *pgmap) in devmap_managed_enable_get() argument
61 static void devmap_managed_enable_put(struct dev_pagemap *pgmap) in devmap_managed_enable_put() argument
73 static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id) in pfn_first() argument
75 struct range *range = &pgmap->ranges[range_id]; in pfn_first()
80 return pfn + vmem_altmap_offset(pgmap_altmap(pgmap)); in pfn_first()
83 bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) in pgmap_pfn_valid() argument
87 for (i = 0; i < pgmap->nr_range; i++) { in pgmap_pfn_valid()
88 struct range *range = &pgmap->ranges[i]; in pgmap_pfn_valid()
92 return pfn >= pfn_first(pgmap, i); in pgmap_pfn_valid()
98 static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id) in pfn_end() argument
100 const struct range *range = &pgmap->ranges[range_id]; in pfn_end()
115 static void dev_pagemap_kill(struct dev_pagemap *pgmap) in dev_pagemap_kill() argument
117 if (pgmap->ops && pgmap->ops->kill) in dev_pagemap_kill()
118 pgmap->ops->kill(pgmap); in dev_pagemap_kill()
120 percpu_ref_kill(pgmap->ref); in dev_pagemap_kill()
123 static void dev_pagemap_cleanup(struct dev_pagemap *pgmap) in dev_pagemap_cleanup() argument
125 if (pgmap->ops && pgmap->ops->cleanup) { in dev_pagemap_cleanup()
126 pgmap->ops->cleanup(pgmap); in dev_pagemap_cleanup()
128 wait_for_completion(&pgmap->done); in dev_pagemap_cleanup()
129 percpu_ref_exit(pgmap->ref); in dev_pagemap_cleanup()
135 if (pgmap->ref == &pgmap->internal_ref) in dev_pagemap_cleanup()
136 pgmap->ref = NULL; in dev_pagemap_cleanup()
139 static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) in pageunmap_range() argument
141 struct range *range = &pgmap->ranges[range_id]; in pageunmap_range()
145 first_page = pfn_to_page(pfn_first(pgmap, range_id)); in pageunmap_range()
151 if (pgmap->type == MEMORY_DEVICE_PRIVATE) { in pageunmap_range()
156 pgmap_altmap(pgmap)); in pageunmap_range()
165 void memunmap_pages(struct dev_pagemap *pgmap) in memunmap_pages() argument
170 dev_pagemap_kill(pgmap); in memunmap_pages()
171 for (i = 0; i < pgmap->nr_range; i++) in memunmap_pages()
172 for_each_device_pfn(pfn, pgmap, i) in memunmap_pages()
174 dev_pagemap_cleanup(pgmap); in memunmap_pages()
176 for (i = 0; i < pgmap->nr_range; i++) in memunmap_pages()
177 pageunmap_range(pgmap, i); in memunmap_pages()
179 WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); in memunmap_pages()
180 devmap_managed_enable_put(pgmap); in memunmap_pages()
191 struct dev_pagemap *pgmap = in dev_pagemap_percpu_release() local
194 complete(&pgmap->done); in dev_pagemap_percpu_release()
197 static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, in pagemap_range() argument
200 const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE; in pagemap_range()
201 struct range *range = &pgmap->ranges[range_id]; in pagemap_range()
205 if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0, in pagemap_range()
234 PHYS_PFN(range->end), pgmap, GFP_KERNEL)); in pagemap_range()
297 PHYS_PFN(range_len(range)), pgmap); in pagemap_range()
298 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id) in pagemap_range()
299 - pfn_first(pgmap, range_id)); in pagemap_range()
317 void *memremap_pages(struct dev_pagemap *pgmap, int nid) in memremap_pages() argument
320 .altmap = pgmap_altmap(pgmap), in memremap_pages()
323 const int nr_range = pgmap->nr_range; in memremap_pages()
329 switch (pgmap->type) { in memremap_pages()
335 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { in memremap_pages()
339 if (!pgmap->ops->page_free) { in memremap_pages()
343 if (!pgmap->owner) { in memremap_pages()
361 WARN(1, "Invalid pgmap type %d\n", pgmap->type); in memremap_pages()
365 if (!pgmap->ref) { in memremap_pages()
366 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup)) in memremap_pages()
369 init_completion(&pgmap->done); in memremap_pages()
370 error = percpu_ref_init(&pgmap->internal_ref, in memremap_pages()
374 pgmap->ref = &pgmap->internal_ref; in memremap_pages()
376 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) { in memremap_pages()
382 devmap_managed_enable_get(pgmap); in memremap_pages()
389 pgmap->nr_range = 0; in memremap_pages()
392 error = pagemap_range(pgmap, &params, i, nid); in memremap_pages()
395 pgmap->nr_range++; in memremap_pages()
399 memunmap_pages(pgmap); in memremap_pages()
400 pgmap->nr_range = nr_range; in memremap_pages()
404 return __va(pgmap->ranges[0].start); in memremap_pages()
428 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) in devm_memremap_pages() argument
433 ret = memremap_pages(pgmap, dev_to_node(dev)); in devm_memremap_pages()
438 pgmap); in devm_memremap_pages()
445 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap) in devm_memunmap_pages() argument
447 devm_release_action(dev, devm_memremap_pages_release, pgmap); in devm_memunmap_pages()
473 struct dev_pagemap *pgmap) in get_dev_pagemap() argument
480 if (pgmap) { in get_dev_pagemap()
481 if (phys >= pgmap->range.start && phys <= pgmap->range.end) in get_dev_pagemap()
482 return pgmap; in get_dev_pagemap()
483 put_dev_pagemap(pgmap); in get_dev_pagemap()
488 pgmap = xa_load(&pgmap_array, PHYS_PFN(phys)); in get_dev_pagemap()
489 if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) in get_dev_pagemap()
490 pgmap = NULL; in get_dev_pagemap()
493 return pgmap; in get_dev_pagemap()
532 page->pgmap->ops->page_free(page); in free_devmap_managed_page()