1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *	linux/kernel/resource.c
4  *
5  * Copyright (C) 1999	Linus Torvalds
6  * Copyright (C) 1999	Martin Mares <mj@ucw.cz>
7  *
8  * Arbitrary resource management.
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/export.h>
14 #include <linux/errno.h>
15 #include <linux/ioport.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/fs.h>
20 #include <linux/proc_fs.h>
21 #include <linux/pseudo_fs.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
24 #include <linux/device.h>
25 #include <linux/pfn.h>
26 #include <linux/mm.h>
27 #include <linux/mount.h>
28 #include <linux/resource_ext.h>
29 #include <uapi/linux/magic.h>
30 #include <asm/io.h>
31 
32 
33 struct resource ioport_resource = {
34 	.name	= "PCI IO",
35 	.start	= 0,
36 	.end	= IO_SPACE_LIMIT,
37 	.flags	= IORESOURCE_IO,
38 };
39 EXPORT_SYMBOL(ioport_resource);
40 
41 struct resource iomem_resource = {
42 	.name	= "PCI mem",
43 	.start	= 0,
44 	.end	= -1,
45 	.flags	= IORESOURCE_MEM,
46 };
47 EXPORT_SYMBOL(iomem_resource);
48 
49 /* constraints to be met while allocating resources */
50 struct resource_constraint {
51 	resource_size_t min, max, align;
52 	resource_size_t (*alignf)(void *, const struct resource *,
53 			resource_size_t, resource_size_t);
54 	void *alignf_data;
55 };
56 
57 static DEFINE_RWLOCK(resource_lock);
58 
59 /*
60  * For memory hotplug, there is no way to free resource entries allocated
61  * by boot mem after the system is up. So for reusing the resource entry
62  * we need to remember the resource.
63  */
64 static struct resource *bootmem_resource_free;
65 static DEFINE_SPINLOCK(bootmem_resource_lock);
66 
next_resource(struct resource * p)67 static struct resource *next_resource(struct resource *p)
68 {
69 	if (p->child)
70 		return p->child;
71 	while (!p->sibling && p->parent)
72 		p = p->parent;
73 	return p->sibling;
74 }
75 
next_resource_skip_children(struct resource * p)76 static struct resource *next_resource_skip_children(struct resource *p)
77 {
78 	while (!p->sibling && p->parent)
79 		p = p->parent;
80 	return p->sibling;
81 }
82 
83 #define for_each_resource(_root, _p, _skip_children) \
84 	for ((_p) = (_root)->child; (_p); \
85 	     (_p) = (_skip_children) ? next_resource_skip_children(_p) : \
86 				       next_resource(_p))
87 
r_next(struct seq_file * m,void * v,loff_t * pos)88 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
89 {
90 	struct resource *p = v;
91 	(*pos)++;
92 	return (void *)next_resource(p);
93 }
94 
95 #ifdef CONFIG_PROC_FS
96 
97 enum { MAX_IORES_LEVEL = 5 };
98 
r_start(struct seq_file * m,loff_t * pos)99 static void *r_start(struct seq_file *m, loff_t *pos)
100 	__acquires(resource_lock)
101 {
102 	struct resource *p = PDE_DATA(file_inode(m->file));
103 	loff_t l = 0;
104 	read_lock(&resource_lock);
105 	for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
106 		;
107 	return p;
108 }
109 
r_stop(struct seq_file * m,void * v)110 static void r_stop(struct seq_file *m, void *v)
111 	__releases(resource_lock)
112 {
113 	read_unlock(&resource_lock);
114 }
115 
r_show(struct seq_file * m,void * v)116 static int r_show(struct seq_file *m, void *v)
117 {
118 	struct resource *root = PDE_DATA(file_inode(m->file));
119 	struct resource *r = v, *p;
120 	unsigned long long start, end;
121 	int width = root->end < 0x10000 ? 4 : 8;
122 	int depth;
123 
124 	for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
125 		if (p->parent == root)
126 			break;
127 
128 	if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
129 		start = r->start;
130 		end = r->end;
131 	} else {
132 		start = end = 0;
133 	}
134 
135 	seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
136 			depth * 2, "",
137 			width, start,
138 			width, end,
139 			r->name ? r->name : "<BAD>");
140 	return 0;
141 }
142 
143 static const struct seq_operations resource_op = {
144 	.start	= r_start,
145 	.next	= r_next,
146 	.stop	= r_stop,
147 	.show	= r_show,
148 };
149 
ioresources_init(void)150 static int __init ioresources_init(void)
151 {
152 	proc_create_seq_data("ioports", 0, NULL, &resource_op,
153 			&ioport_resource);
154 	proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
155 	return 0;
156 }
157 __initcall(ioresources_init);
158 
159 #endif /* CONFIG_PROC_FS */
160 
free_resource(struct resource * res)161 static void free_resource(struct resource *res)
162 {
163 	if (!res)
164 		return;
165 
166 	if (!PageSlab(virt_to_head_page(res))) {
167 		spin_lock(&bootmem_resource_lock);
168 		res->sibling = bootmem_resource_free;
169 		bootmem_resource_free = res;
170 		spin_unlock(&bootmem_resource_lock);
171 	} else {
172 		kfree(res);
173 	}
174 }
175 
alloc_resource(gfp_t flags)176 static struct resource *alloc_resource(gfp_t flags)
177 {
178 	struct resource *res = NULL;
179 
180 	spin_lock(&bootmem_resource_lock);
181 	if (bootmem_resource_free) {
182 		res = bootmem_resource_free;
183 		bootmem_resource_free = res->sibling;
184 	}
185 	spin_unlock(&bootmem_resource_lock);
186 
187 	if (res)
188 		memset(res, 0, sizeof(struct resource));
189 	else
190 		res = kzalloc(sizeof(struct resource), flags);
191 
192 	return res;
193 }
194 
195 /* Return the conflict entry if you can't request it */
__request_resource(struct resource * root,struct resource * new)196 static struct resource * __request_resource(struct resource *root, struct resource *new)
197 {
198 	resource_size_t start = new->start;
199 	resource_size_t end = new->end;
200 	struct resource *tmp, **p;
201 
202 	if (end < start)
203 		return root;
204 	if (start < root->start)
205 		return root;
206 	if (end > root->end)
207 		return root;
208 	p = &root->child;
209 	for (;;) {
210 		tmp = *p;
211 		if (!tmp || tmp->start > end) {
212 			new->sibling = tmp;
213 			*p = new;
214 			new->parent = root;
215 			return NULL;
216 		}
217 		p = &tmp->sibling;
218 		if (tmp->end < start)
219 			continue;
220 		return tmp;
221 	}
222 }
223 
__release_resource(struct resource * old,bool release_child)224 static int __release_resource(struct resource *old, bool release_child)
225 {
226 	struct resource *tmp, **p, *chd;
227 
228 	p = &old->parent->child;
229 	for (;;) {
230 		tmp = *p;
231 		if (!tmp)
232 			break;
233 		if (tmp == old) {
234 			if (release_child || !(tmp->child)) {
235 				*p = tmp->sibling;
236 			} else {
237 				for (chd = tmp->child;; chd = chd->sibling) {
238 					chd->parent = tmp->parent;
239 					if (!(chd->sibling))
240 						break;
241 				}
242 				*p = tmp->child;
243 				chd->sibling = tmp->sibling;
244 			}
245 			old->parent = NULL;
246 			return 0;
247 		}
248 		p = &tmp->sibling;
249 	}
250 	return -EINVAL;
251 }
252 
__release_child_resources(struct resource * r)253 static void __release_child_resources(struct resource *r)
254 {
255 	struct resource *tmp, *p;
256 	resource_size_t size;
257 
258 	p = r->child;
259 	r->child = NULL;
260 	while (p) {
261 		tmp = p;
262 		p = p->sibling;
263 
264 		tmp->parent = NULL;
265 		tmp->sibling = NULL;
266 		__release_child_resources(tmp);
267 
268 		printk(KERN_DEBUG "release child resource %pR\n", tmp);
269 		/* need to restore size, and keep flags */
270 		size = resource_size(tmp);
271 		tmp->start = 0;
272 		tmp->end = size - 1;
273 	}
274 }
275 
release_child_resources(struct resource * r)276 void release_child_resources(struct resource *r)
277 {
278 	write_lock(&resource_lock);
279 	__release_child_resources(r);
280 	write_unlock(&resource_lock);
281 }
282 
283 /**
284  * request_resource_conflict - request and reserve an I/O or memory resource
285  * @root: root resource descriptor
286  * @new: resource descriptor desired by caller
287  *
288  * Returns 0 for success, conflict resource on error.
289  */
request_resource_conflict(struct resource * root,struct resource * new)290 struct resource *request_resource_conflict(struct resource *root, struct resource *new)
291 {
292 	struct resource *conflict;
293 
294 	write_lock(&resource_lock);
295 	conflict = __request_resource(root, new);
296 	write_unlock(&resource_lock);
297 	return conflict;
298 }
299 
300 /**
301  * request_resource - request and reserve an I/O or memory resource
302  * @root: root resource descriptor
303  * @new: resource descriptor desired by caller
304  *
305  * Returns 0 for success, negative error code on error.
306  */
request_resource(struct resource * root,struct resource * new)307 int request_resource(struct resource *root, struct resource *new)
308 {
309 	struct resource *conflict;
310 
311 	conflict = request_resource_conflict(root, new);
312 	return conflict ? -EBUSY : 0;
313 }
314 
315 EXPORT_SYMBOL(request_resource);
316 
317 /**
318  * release_resource - release a previously reserved resource
319  * @old: resource pointer
320  */
release_resource(struct resource * old)321 int release_resource(struct resource *old)
322 {
323 	int retval;
324 
325 	write_lock(&resource_lock);
326 	retval = __release_resource(old, true);
327 	write_unlock(&resource_lock);
328 	return retval;
329 }
330 
331 EXPORT_SYMBOL(release_resource);
332 
333 /**
334  * find_next_iomem_res - Finds the lowest iomem resource that covers part of
335  *			 [@start..@end].
336  *
337  * If a resource is found, returns 0 and @*res is overwritten with the part
338  * of the resource that's within [@start..@end]; if none is found, returns
339  * -ENODEV.  Returns -EINVAL for invalid parameters.
340  *
341  * @start:	start address of the resource searched for
342  * @end:	end address of same resource
343  * @flags:	flags which the resource must have
344  * @desc:	descriptor the resource must have
345  * @res:	return ptr, if resource found
346  *
347  * The caller must specify @start, @end, @flags, and @desc
348  * (which may be IORES_DESC_NONE).
349  */
find_next_iomem_res(resource_size_t start,resource_size_t end,unsigned long flags,unsigned long desc,struct resource * res)350 static int find_next_iomem_res(resource_size_t start, resource_size_t end,
351 			       unsigned long flags, unsigned long desc,
352 			       struct resource *res)
353 {
354 	struct resource *p;
355 
356 	if (!res)
357 		return -EINVAL;
358 
359 	if (start >= end)
360 		return -EINVAL;
361 
362 	read_lock(&resource_lock);
363 
364 	for (p = iomem_resource.child; p; p = next_resource(p)) {
365 		/* If we passed the resource we are looking for, stop */
366 		if (p->start > end) {
367 			p = NULL;
368 			break;
369 		}
370 
371 		/* Skip until we find a range that matches what we look for */
372 		if (p->end < start)
373 			continue;
374 
375 		if ((p->flags & flags) != flags)
376 			continue;
377 		if ((desc != IORES_DESC_NONE) && (desc != p->desc))
378 			continue;
379 
380 		/* Found a match, break */
381 		break;
382 	}
383 
384 	if (p) {
385 		/* copy data */
386 		*res = (struct resource) {
387 			.start = max(start, p->start),
388 			.end = min(end, p->end),
389 			.flags = p->flags,
390 			.desc = p->desc,
391 			.parent = p->parent,
392 		};
393 	}
394 
395 	read_unlock(&resource_lock);
396 	return p ? 0 : -ENODEV;
397 }
398 
__walk_iomem_res_desc(resource_size_t start,resource_size_t end,unsigned long flags,unsigned long desc,void * arg,int (* func)(struct resource *,void *))399 static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
400 				 unsigned long flags, unsigned long desc,
401 				 void *arg,
402 				 int (*func)(struct resource *, void *))
403 {
404 	struct resource res;
405 	int ret = -EINVAL;
406 
407 	while (start < end &&
408 	       !find_next_iomem_res(start, end, flags, desc, &res)) {
409 		ret = (*func)(&res, arg);
410 		if (ret)
411 			break;
412 
413 		start = res.end + 1;
414 	}
415 
416 	return ret;
417 }
418 
419 /**
420  * walk_iomem_res_desc - Walks through iomem resources and calls func()
421  *			 with matching resource ranges.
422  * *
423  * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
424  * @flags: I/O resource flags
425  * @start: start addr
426  * @end: end addr
427  * @arg: function argument for the callback @func
428  * @func: callback function that is called for each qualifying resource area
429  *
430  * All the memory ranges which overlap start,end and also match flags and
431  * desc are valid candidates.
432  *
433  * NOTE: For a new descriptor search, define a new IORES_DESC in
434  * <linux/ioport.h> and set it in 'desc' of a target resource entry.
435  */
walk_iomem_res_desc(unsigned long desc,unsigned long flags,u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))436 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
437 		u64 end, void *arg, int (*func)(struct resource *, void *))
438 {
439 	return __walk_iomem_res_desc(start, end, flags, desc, arg, func);
440 }
441 EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
442 
443 /*
444  * This function calls the @func callback against all memory ranges of type
445  * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
446  * Now, this function is only for System RAM, it deals with full ranges and
447  * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
448  * ranges.
449  */
walk_system_ram_res(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))450 int walk_system_ram_res(u64 start, u64 end, void *arg,
451 			int (*func)(struct resource *, void *))
452 {
453 	unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
454 
455 	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
456 				     func);
457 }
458 
459 /*
460  * This function calls the @func callback against all memory ranges, which
461  * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
462  */
walk_mem_res(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))463 int walk_mem_res(u64 start, u64 end, void *arg,
464 		 int (*func)(struct resource *, void *))
465 {
466 	unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
467 
468 	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
469 				     func);
470 }
471 
472 /*
473  * This function calls the @func callback against all memory ranges of type
474  * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
475  * It is to be used only for System RAM.
476  */
walk_system_ram_range(unsigned long start_pfn,unsigned long nr_pages,void * arg,int (* func)(unsigned long,unsigned long,void *))477 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
478 			  void *arg, int (*func)(unsigned long, unsigned long, void *))
479 {
480 	resource_size_t start, end;
481 	unsigned long flags;
482 	struct resource res;
483 	unsigned long pfn, end_pfn;
484 	int ret = -EINVAL;
485 
486 	start = (u64) start_pfn << PAGE_SHIFT;
487 	end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
488 	flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
489 	while (start < end &&
490 	       !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) {
491 		pfn = PFN_UP(res.start);
492 		end_pfn = PFN_DOWN(res.end + 1);
493 		if (end_pfn > pfn)
494 			ret = (*func)(pfn, end_pfn - pfn, arg);
495 		if (ret)
496 			break;
497 		start = res.end + 1;
498 	}
499 	return ret;
500 }
501 
__is_ram(unsigned long pfn,unsigned long nr_pages,void * arg)502 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
503 {
504 	return 1;
505 }
506 
507 /*
508  * This generic page_is_ram() returns true if specified address is
509  * registered as System RAM in iomem_resource list.
510  */
page_is_ram(unsigned long pfn)511 int __weak page_is_ram(unsigned long pfn)
512 {
513 	return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
514 }
515 EXPORT_SYMBOL_GPL(page_is_ram);
516 
__region_intersects(resource_size_t start,size_t size,unsigned long flags,unsigned long desc)517 static int __region_intersects(resource_size_t start, size_t size,
518 			unsigned long flags, unsigned long desc)
519 {
520 	struct resource res;
521 	int type = 0; int other = 0;
522 	struct resource *p;
523 
524 	res.start = start;
525 	res.end = start + size - 1;
526 
527 	for (p = iomem_resource.child; p ; p = p->sibling) {
528 		bool is_type = (((p->flags & flags) == flags) &&
529 				((desc == IORES_DESC_NONE) ||
530 				 (desc == p->desc)));
531 
532 		if (resource_overlaps(p, &res))
533 			is_type ? type++ : other++;
534 	}
535 
536 	if (type == 0)
537 		return REGION_DISJOINT;
538 
539 	if (other == 0)
540 		return REGION_INTERSECTS;
541 
542 	return REGION_MIXED;
543 }
544 
545 /**
546  * region_intersects() - determine intersection of region with known resources
547  * @start: region start address
548  * @size: size of region
549  * @flags: flags of resource (in iomem_resource)
550  * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
551  *
552  * Check if the specified region partially overlaps or fully eclipses a
553  * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
554  * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
555  * return REGION_MIXED if the region overlaps @flags/@desc and another
556  * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
557  * and no other defined resource. Note that REGION_INTERSECTS is also
558  * returned in the case when the specified region overlaps RAM and undefined
559  * memory holes.
560  *
561  * region_intersect() is used by memory remapping functions to ensure
562  * the user is not remapping RAM and is a vast speed up over walking
563  * through the resource table page by page.
564  */
region_intersects(resource_size_t start,size_t size,unsigned long flags,unsigned long desc)565 int region_intersects(resource_size_t start, size_t size, unsigned long flags,
566 		      unsigned long desc)
567 {
568 	int ret;
569 
570 	read_lock(&resource_lock);
571 	ret = __region_intersects(start, size, flags, desc);
572 	read_unlock(&resource_lock);
573 
574 	return ret;
575 }
576 EXPORT_SYMBOL_GPL(region_intersects);
577 
arch_remove_reservations(struct resource * avail)578 void __weak arch_remove_reservations(struct resource *avail)
579 {
580 }
581 
simple_align_resource(void * data,const struct resource * avail,resource_size_t size,resource_size_t align)582 static resource_size_t simple_align_resource(void *data,
583 					     const struct resource *avail,
584 					     resource_size_t size,
585 					     resource_size_t align)
586 {
587 	return avail->start;
588 }
589 
resource_clip(struct resource * res,resource_size_t min,resource_size_t max)590 static void resource_clip(struct resource *res, resource_size_t min,
591 			  resource_size_t max)
592 {
593 	if (res->start < min)
594 		res->start = min;
595 	if (res->end > max)
596 		res->end = max;
597 }
598 
599 /*
600  * Find empty slot in the resource tree with the given range and
601  * alignment constraints
602  */
__find_resource(struct resource * root,struct resource * old,struct resource * new,resource_size_t size,struct resource_constraint * constraint)603 static int __find_resource(struct resource *root, struct resource *old,
604 			 struct resource *new,
605 			 resource_size_t  size,
606 			 struct resource_constraint *constraint)
607 {
608 	struct resource *this = root->child;
609 	struct resource tmp = *new, avail, alloc;
610 
611 	tmp.start = root->start;
612 	/*
613 	 * Skip past an allocated resource that starts at 0, since the assignment
614 	 * of this->start - 1 to tmp->end below would cause an underflow.
615 	 */
616 	if (this && this->start == root->start) {
617 		tmp.start = (this == old) ? old->start : this->end + 1;
618 		this = this->sibling;
619 	}
620 	for(;;) {
621 		if (this)
622 			tmp.end = (this == old) ?  this->end : this->start - 1;
623 		else
624 			tmp.end = root->end;
625 
626 		if (tmp.end < tmp.start)
627 			goto next;
628 
629 		resource_clip(&tmp, constraint->min, constraint->max);
630 		arch_remove_reservations(&tmp);
631 
632 		/* Check for overflow after ALIGN() */
633 		avail.start = ALIGN(tmp.start, constraint->align);
634 		avail.end = tmp.end;
635 		avail.flags = new->flags & ~IORESOURCE_UNSET;
636 		if (avail.start >= tmp.start) {
637 			alloc.flags = avail.flags;
638 			alloc.start = constraint->alignf(constraint->alignf_data, &avail,
639 					size, constraint->align);
640 			alloc.end = alloc.start + size - 1;
641 			if (alloc.start <= alloc.end &&
642 			    resource_contains(&avail, &alloc)) {
643 				new->start = alloc.start;
644 				new->end = alloc.end;
645 				return 0;
646 			}
647 		}
648 
649 next:		if (!this || this->end == root->end)
650 			break;
651 
652 		if (this != old)
653 			tmp.start = this->end + 1;
654 		this = this->sibling;
655 	}
656 	return -EBUSY;
657 }
658 
659 /*
660  * Find empty slot in the resource tree given range and alignment.
661  */
find_resource(struct resource * root,struct resource * new,resource_size_t size,struct resource_constraint * constraint)662 static int find_resource(struct resource *root, struct resource *new,
663 			resource_size_t size,
664 			struct resource_constraint  *constraint)
665 {
666 	return  __find_resource(root, NULL, new, size, constraint);
667 }
668 
669 /**
670  * reallocate_resource - allocate a slot in the resource tree given range & alignment.
671  *	The resource will be relocated if the new size cannot be reallocated in the
672  *	current location.
673  *
674  * @root: root resource descriptor
675  * @old:  resource descriptor desired by caller
676  * @newsize: new size of the resource descriptor
677  * @constraint: the size and alignment constraints to be met.
678  */
reallocate_resource(struct resource * root,struct resource * old,resource_size_t newsize,struct resource_constraint * constraint)679 static int reallocate_resource(struct resource *root, struct resource *old,
680 			       resource_size_t newsize,
681 			       struct resource_constraint *constraint)
682 {
683 	int err=0;
684 	struct resource new = *old;
685 	struct resource *conflict;
686 
687 	write_lock(&resource_lock);
688 
689 	if ((err = __find_resource(root, old, &new, newsize, constraint)))
690 		goto out;
691 
692 	if (resource_contains(&new, old)) {
693 		old->start = new.start;
694 		old->end = new.end;
695 		goto out;
696 	}
697 
698 	if (old->child) {
699 		err = -EBUSY;
700 		goto out;
701 	}
702 
703 	if (resource_contains(old, &new)) {
704 		old->start = new.start;
705 		old->end = new.end;
706 	} else {
707 		__release_resource(old, true);
708 		*old = new;
709 		conflict = __request_resource(root, old);
710 		BUG_ON(conflict);
711 	}
712 out:
713 	write_unlock(&resource_lock);
714 	return err;
715 }
716 
717 
718 /**
719  * allocate_resource - allocate empty slot in the resource tree given range & alignment.
720  * 	The resource will be reallocated with a new size if it was already allocated
721  * @root: root resource descriptor
722  * @new: resource descriptor desired by caller
723  * @size: requested resource region size
724  * @min: minimum boundary to allocate
725  * @max: maximum boundary to allocate
726  * @align: alignment requested, in bytes
727  * @alignf: alignment function, optional, called if not NULL
728  * @alignf_data: arbitrary data to pass to the @alignf function
729  */
allocate_resource(struct resource * root,struct resource * new,resource_size_t size,resource_size_t min,resource_size_t max,resource_size_t align,resource_size_t (* alignf)(void *,const struct resource *,resource_size_t,resource_size_t),void * alignf_data)730 int allocate_resource(struct resource *root, struct resource *new,
731 		      resource_size_t size, resource_size_t min,
732 		      resource_size_t max, resource_size_t align,
733 		      resource_size_t (*alignf)(void *,
734 						const struct resource *,
735 						resource_size_t,
736 						resource_size_t),
737 		      void *alignf_data)
738 {
739 	int err;
740 	struct resource_constraint constraint;
741 
742 	if (!alignf)
743 		alignf = simple_align_resource;
744 
745 	constraint.min = min;
746 	constraint.max = max;
747 	constraint.align = align;
748 	constraint.alignf = alignf;
749 	constraint.alignf_data = alignf_data;
750 
751 	if ( new->parent ) {
752 		/* resource is already allocated, try reallocating with
753 		   the new constraints */
754 		return reallocate_resource(root, new, size, &constraint);
755 	}
756 
757 	write_lock(&resource_lock);
758 	err = find_resource(root, new, size, &constraint);
759 	if (err >= 0 && __request_resource(root, new))
760 		err = -EBUSY;
761 	write_unlock(&resource_lock);
762 	return err;
763 }
764 
765 EXPORT_SYMBOL(allocate_resource);
766 
767 /**
768  * lookup_resource - find an existing resource by a resource start address
769  * @root: root resource descriptor
770  * @start: resource start address
771  *
772  * Returns a pointer to the resource if found, NULL otherwise
773  */
lookup_resource(struct resource * root,resource_size_t start)774 struct resource *lookup_resource(struct resource *root, resource_size_t start)
775 {
776 	struct resource *res;
777 
778 	read_lock(&resource_lock);
779 	for (res = root->child; res; res = res->sibling) {
780 		if (res->start == start)
781 			break;
782 	}
783 	read_unlock(&resource_lock);
784 
785 	return res;
786 }
787 
788 /*
789  * Insert a resource into the resource tree. If successful, return NULL,
790  * otherwise return the conflicting resource (compare to __request_resource())
791  */
__insert_resource(struct resource * parent,struct resource * new)792 static struct resource * __insert_resource(struct resource *parent, struct resource *new)
793 {
794 	struct resource *first, *next;
795 
796 	for (;; parent = first) {
797 		first = __request_resource(parent, new);
798 		if (!first)
799 			return first;
800 
801 		if (first == parent)
802 			return first;
803 		if (WARN_ON(first == new))	/* duplicated insertion */
804 			return first;
805 
806 		if ((first->start > new->start) || (first->end < new->end))
807 			break;
808 		if ((first->start == new->start) && (first->end == new->end))
809 			break;
810 	}
811 
812 	for (next = first; ; next = next->sibling) {
813 		/* Partial overlap? Bad, and unfixable */
814 		if (next->start < new->start || next->end > new->end)
815 			return next;
816 		if (!next->sibling)
817 			break;
818 		if (next->sibling->start > new->end)
819 			break;
820 	}
821 
822 	new->parent = parent;
823 	new->sibling = next->sibling;
824 	new->child = first;
825 
826 	next->sibling = NULL;
827 	for (next = first; next; next = next->sibling)
828 		next->parent = new;
829 
830 	if (parent->child == first) {
831 		parent->child = new;
832 	} else {
833 		next = parent->child;
834 		while (next->sibling != first)
835 			next = next->sibling;
836 		next->sibling = new;
837 	}
838 	return NULL;
839 }
840 
841 /**
842  * insert_resource_conflict - Inserts resource in the resource tree
843  * @parent: parent of the new resource
844  * @new: new resource to insert
845  *
846  * Returns 0 on success, conflict resource if the resource can't be inserted.
847  *
848  * This function is equivalent to request_resource_conflict when no conflict
849  * happens. If a conflict happens, and the conflicting resources
850  * entirely fit within the range of the new resource, then the new
851  * resource is inserted and the conflicting resources become children of
852  * the new resource.
853  *
854  * This function is intended for producers of resources, such as FW modules
855  * and bus drivers.
856  */
insert_resource_conflict(struct resource * parent,struct resource * new)857 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
858 {
859 	struct resource *conflict;
860 
861 	write_lock(&resource_lock);
862 	conflict = __insert_resource(parent, new);
863 	write_unlock(&resource_lock);
864 	return conflict;
865 }
866 
867 /**
868  * insert_resource - Inserts a resource in the resource tree
869  * @parent: parent of the new resource
870  * @new: new resource to insert
871  *
872  * Returns 0 on success, -EBUSY if the resource can't be inserted.
873  *
874  * This function is intended for producers of resources, such as FW modules
875  * and bus drivers.
876  */
insert_resource(struct resource * parent,struct resource * new)877 int insert_resource(struct resource *parent, struct resource *new)
878 {
879 	struct resource *conflict;
880 
881 	conflict = insert_resource_conflict(parent, new);
882 	return conflict ? -EBUSY : 0;
883 }
884 EXPORT_SYMBOL_GPL(insert_resource);
885 
886 /**
887  * insert_resource_expand_to_fit - Insert a resource into the resource tree
888  * @root: root resource descriptor
889  * @new: new resource to insert
890  *
891  * Insert a resource into the resource tree, possibly expanding it in order
892  * to make it encompass any conflicting resources.
893  */
insert_resource_expand_to_fit(struct resource * root,struct resource * new)894 void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
895 {
896 	if (new->parent)
897 		return;
898 
899 	write_lock(&resource_lock);
900 	for (;;) {
901 		struct resource *conflict;
902 
903 		conflict = __insert_resource(root, new);
904 		if (!conflict)
905 			break;
906 		if (conflict == root)
907 			break;
908 
909 		/* Ok, expand resource to cover the conflict, then try again .. */
910 		if (conflict->start < new->start)
911 			new->start = conflict->start;
912 		if (conflict->end > new->end)
913 			new->end = conflict->end;
914 
915 		printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
916 	}
917 	write_unlock(&resource_lock);
918 }
919 
920 /**
921  * remove_resource - Remove a resource in the resource tree
922  * @old: resource to remove
923  *
924  * Returns 0 on success, -EINVAL if the resource is not valid.
925  *
926  * This function removes a resource previously inserted by insert_resource()
927  * or insert_resource_conflict(), and moves the children (if any) up to
928  * where they were before.  insert_resource() and insert_resource_conflict()
929  * insert a new resource, and move any conflicting resources down to the
930  * children of the new resource.
931  *
932  * insert_resource(), insert_resource_conflict() and remove_resource() are
933  * intended for producers of resources, such as FW modules and bus drivers.
934  */
remove_resource(struct resource * old)935 int remove_resource(struct resource *old)
936 {
937 	int retval;
938 
939 	write_lock(&resource_lock);
940 	retval = __release_resource(old, false);
941 	write_unlock(&resource_lock);
942 	return retval;
943 }
944 EXPORT_SYMBOL_GPL(remove_resource);
945 
__adjust_resource(struct resource * res,resource_size_t start,resource_size_t size)946 static int __adjust_resource(struct resource *res, resource_size_t start,
947 				resource_size_t size)
948 {
949 	struct resource *tmp, *parent = res->parent;
950 	resource_size_t end = start + size - 1;
951 	int result = -EBUSY;
952 
953 	if (!parent)
954 		goto skip;
955 
956 	if ((start < parent->start) || (end > parent->end))
957 		goto out;
958 
959 	if (res->sibling && (res->sibling->start <= end))
960 		goto out;
961 
962 	tmp = parent->child;
963 	if (tmp != res) {
964 		while (tmp->sibling != res)
965 			tmp = tmp->sibling;
966 		if (start <= tmp->end)
967 			goto out;
968 	}
969 
970 skip:
971 	for (tmp = res->child; tmp; tmp = tmp->sibling)
972 		if ((tmp->start < start) || (tmp->end > end))
973 			goto out;
974 
975 	res->start = start;
976 	res->end = end;
977 	result = 0;
978 
979  out:
980 	return result;
981 }
982 
983 /**
984  * adjust_resource - modify a resource's start and size
985  * @res: resource to modify
986  * @start: new start value
987  * @size: new size
988  *
989  * Given an existing resource, change its start and size to match the
990  * arguments.  Returns 0 on success, -EBUSY if it can't fit.
991  * Existing children of the resource are assumed to be immutable.
992  */
adjust_resource(struct resource * res,resource_size_t start,resource_size_t size)993 int adjust_resource(struct resource *res, resource_size_t start,
994 		    resource_size_t size)
995 {
996 	int result;
997 
998 	write_lock(&resource_lock);
999 	result = __adjust_resource(res, start, size);
1000 	write_unlock(&resource_lock);
1001 	return result;
1002 }
1003 EXPORT_SYMBOL(adjust_resource);
1004 
1005 static void __init
__reserve_region_with_split(struct resource * root,resource_size_t start,resource_size_t end,const char * name)1006 __reserve_region_with_split(struct resource *root, resource_size_t start,
1007 			    resource_size_t end, const char *name)
1008 {
1009 	struct resource *parent = root;
1010 	struct resource *conflict;
1011 	struct resource *res = alloc_resource(GFP_ATOMIC);
1012 	struct resource *next_res = NULL;
1013 	int type = resource_type(root);
1014 
1015 	if (!res)
1016 		return;
1017 
1018 	res->name = name;
1019 	res->start = start;
1020 	res->end = end;
1021 	res->flags = type | IORESOURCE_BUSY;
1022 	res->desc = IORES_DESC_NONE;
1023 
1024 	while (1) {
1025 
1026 		conflict = __request_resource(parent, res);
1027 		if (!conflict) {
1028 			if (!next_res)
1029 				break;
1030 			res = next_res;
1031 			next_res = NULL;
1032 			continue;
1033 		}
1034 
1035 		/* conflict covered whole area */
1036 		if (conflict->start <= res->start &&
1037 				conflict->end >= res->end) {
1038 			free_resource(res);
1039 			WARN_ON(next_res);
1040 			break;
1041 		}
1042 
1043 		/* failed, split and try again */
1044 		if (conflict->start > res->start) {
1045 			end = res->end;
1046 			res->end = conflict->start - 1;
1047 			if (conflict->end < end) {
1048 				next_res = alloc_resource(GFP_ATOMIC);
1049 				if (!next_res) {
1050 					free_resource(res);
1051 					break;
1052 				}
1053 				next_res->name = name;
1054 				next_res->start = conflict->end + 1;
1055 				next_res->end = end;
1056 				next_res->flags = type | IORESOURCE_BUSY;
1057 				next_res->desc = IORES_DESC_NONE;
1058 			}
1059 		} else {
1060 			res->start = conflict->end + 1;
1061 		}
1062 	}
1063 
1064 }
1065 
1066 void __init
reserve_region_with_split(struct resource * root,resource_size_t start,resource_size_t end,const char * name)1067 reserve_region_with_split(struct resource *root, resource_size_t start,
1068 			  resource_size_t end, const char *name)
1069 {
1070 	int abort = 0;
1071 
1072 	write_lock(&resource_lock);
1073 	if (root->start > start || root->end < end) {
1074 		pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1075 		       (unsigned long long)start, (unsigned long long)end,
1076 		       root);
1077 		if (start > root->end || end < root->start)
1078 			abort = 1;
1079 		else {
1080 			if (end > root->end)
1081 				end = root->end;
1082 			if (start < root->start)
1083 				start = root->start;
1084 			pr_err("fixing request to [0x%llx-0x%llx]\n",
1085 			       (unsigned long long)start,
1086 			       (unsigned long long)end);
1087 		}
1088 		dump_stack();
1089 	}
1090 	if (!abort)
1091 		__reserve_region_with_split(root, start, end, name);
1092 	write_unlock(&resource_lock);
1093 }
1094 
1095 /**
1096  * resource_alignment - calculate resource's alignment
1097  * @res: resource pointer
1098  *
1099  * Returns alignment on success, 0 (invalid alignment) on failure.
1100  */
resource_alignment(struct resource * res)1101 resource_size_t resource_alignment(struct resource *res)
1102 {
1103 	switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1104 	case IORESOURCE_SIZEALIGN:
1105 		return resource_size(res);
1106 	case IORESOURCE_STARTALIGN:
1107 		return res->start;
1108 	default:
1109 		return 0;
1110 	}
1111 }
1112 
1113 /*
1114  * This is compatibility stuff for IO resources.
1115  *
1116  * Note how this, unlike the above, knows about
1117  * the IO flag meanings (busy etc).
1118  *
1119  * request_region creates a new busy region.
1120  *
1121  * release_region releases a matching busy region.
1122  */
1123 
1124 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1125 
1126 static struct inode *iomem_inode;
1127 
1128 #ifdef CONFIG_IO_STRICT_DEVMEM
revoke_iomem(struct resource * res)1129 static void revoke_iomem(struct resource *res)
1130 {
1131 	/* pairs with smp_store_release() in iomem_init_inode() */
1132 	struct inode *inode = smp_load_acquire(&iomem_inode);
1133 
1134 	/*
1135 	 * Check that the initialization has completed. Losing the race
1136 	 * is ok because it means drivers are claiming resources before
1137 	 * the fs_initcall level of init and prevent iomem_get_mapping users
1138 	 * from establishing mappings.
1139 	 */
1140 	if (!inode)
1141 		return;
1142 
1143 	/*
1144 	 * The expectation is that the driver has successfully marked
1145 	 * the resource busy by this point, so devmem_is_allowed()
1146 	 * should start returning false, however for performance this
1147 	 * does not iterate the entire resource range.
1148 	 */
1149 	if (devmem_is_allowed(PHYS_PFN(res->start)) &&
1150 	    devmem_is_allowed(PHYS_PFN(res->end))) {
1151 		/*
1152 		 * *cringe* iomem=relaxed says "go ahead, what's the
1153 		 * worst that can happen?"
1154 		 */
1155 		return;
1156 	}
1157 
1158 	unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
1159 }
1160 #else
revoke_iomem(struct resource * res)1161 static void revoke_iomem(struct resource *res) {}
1162 #endif
1163 
iomem_get_mapping(void)1164 struct address_space *iomem_get_mapping(void)
1165 {
1166 	/*
1167 	 * This function is only called from file open paths, hence guaranteed
1168 	 * that fs_initcalls have completed and no need to check for NULL. But
1169 	 * since revoke_iomem can be called before the initcall we still need
1170 	 * the barrier to appease checkers.
1171 	 */
1172 	return smp_load_acquire(&iomem_inode)->i_mapping;
1173 }
1174 
__request_region_locked(struct resource * res,struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)1175 static int __request_region_locked(struct resource *res, struct resource *parent,
1176 				   resource_size_t start, resource_size_t n,
1177 				   const char *name, int flags)
1178 {
1179 	DECLARE_WAITQUEUE(wait, current);
1180 
1181 	res->name = name;
1182 	res->start = start;
1183 	res->end = start + n - 1;
1184 
1185 	for (;;) {
1186 		struct resource *conflict;
1187 
1188 		res->flags = resource_type(parent) | resource_ext_type(parent);
1189 		res->flags |= IORESOURCE_BUSY | flags;
1190 		res->desc = parent->desc;
1191 
1192 		conflict = __request_resource(parent, res);
1193 		if (!conflict)
1194 			break;
1195 		/*
1196 		 * mm/hmm.c reserves physical addresses which then
1197 		 * become unavailable to other users.  Conflicts are
1198 		 * not expected.  Warn to aid debugging if encountered.
1199 		 */
1200 		if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
1201 			pr_warn("Unaddressable device %s %pR conflicts with %pR",
1202 				conflict->name, conflict, res);
1203 		}
1204 		if (conflict != parent) {
1205 			if (!(conflict->flags & IORESOURCE_BUSY)) {
1206 				parent = conflict;
1207 				continue;
1208 			}
1209 		}
1210 		if (conflict->flags & flags & IORESOURCE_MUXED) {
1211 			add_wait_queue(&muxed_resource_wait, &wait);
1212 			write_unlock(&resource_lock);
1213 			set_current_state(TASK_UNINTERRUPTIBLE);
1214 			schedule();
1215 			remove_wait_queue(&muxed_resource_wait, &wait);
1216 			write_lock(&resource_lock);
1217 			continue;
1218 		}
1219 		/* Uhhuh, that didn't work out.. */
1220 		return -EBUSY;
1221 	}
1222 
1223 	return 0;
1224 }
1225 
1226 /**
1227  * __request_region - create a new busy resource region
1228  * @parent: parent resource descriptor
1229  * @start: resource start address
1230  * @n: resource region size
1231  * @name: reserving caller's ID string
1232  * @flags: IO resource flags
1233  */
__request_region(struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)1234 struct resource *__request_region(struct resource *parent,
1235 				  resource_size_t start, resource_size_t n,
1236 				  const char *name, int flags)
1237 {
1238 	struct resource *res = alloc_resource(GFP_KERNEL);
1239 	int ret;
1240 
1241 	if (!res)
1242 		return NULL;
1243 
1244 	write_lock(&resource_lock);
1245 	ret = __request_region_locked(res, parent, start, n, name, flags);
1246 	write_unlock(&resource_lock);
1247 
1248 	if (ret) {
1249 		free_resource(res);
1250 		return NULL;
1251 	}
1252 
1253 	if (parent == &iomem_resource)
1254 		revoke_iomem(res);
1255 
1256 	return res;
1257 }
1258 EXPORT_SYMBOL(__request_region);
1259 
1260 /**
1261  * __release_region - release a previously reserved resource region
1262  * @parent: parent resource descriptor
1263  * @start: resource start address
1264  * @n: resource region size
1265  *
1266  * The described resource region must match a currently busy region.
1267  */
__release_region(struct resource * parent,resource_size_t start,resource_size_t n)1268 void __release_region(struct resource *parent, resource_size_t start,
1269 		      resource_size_t n)
1270 {
1271 	struct resource **p;
1272 	resource_size_t end;
1273 
1274 	p = &parent->child;
1275 	end = start + n - 1;
1276 
1277 	write_lock(&resource_lock);
1278 
1279 	for (;;) {
1280 		struct resource *res = *p;
1281 
1282 		if (!res)
1283 			break;
1284 		if (res->start <= start && res->end >= end) {
1285 			if (!(res->flags & IORESOURCE_BUSY)) {
1286 				p = &res->child;
1287 				continue;
1288 			}
1289 			if (res->start != start || res->end != end)
1290 				break;
1291 			*p = res->sibling;
1292 			write_unlock(&resource_lock);
1293 			if (res->flags & IORESOURCE_MUXED)
1294 				wake_up(&muxed_resource_wait);
1295 			free_resource(res);
1296 			return;
1297 		}
1298 		p = &res->sibling;
1299 	}
1300 
1301 	write_unlock(&resource_lock);
1302 
1303 	printk(KERN_WARNING "Trying to free nonexistent resource "
1304 		"<%016llx-%016llx>\n", (unsigned long long)start,
1305 		(unsigned long long)end);
1306 }
1307 EXPORT_SYMBOL(__release_region);
1308 
1309 #ifdef CONFIG_MEMORY_HOTREMOVE
1310 /**
1311  * release_mem_region_adjustable - release a previously reserved memory region
1312  * @start: resource start address
1313  * @size: resource region size
1314  *
1315  * This interface is intended for memory hot-delete.  The requested region
1316  * is released from a currently busy memory resource.  The requested region
1317  * must either match exactly or fit into a single busy resource entry.  In
1318  * the latter case, the remaining resource is adjusted accordingly.
1319  * Existing children of the busy memory resource must be immutable in the
1320  * request.
1321  *
1322  * Note:
1323  * - Additional release conditions, such as overlapping region, can be
1324  *   supported after they are confirmed as valid cases.
1325  * - When a busy memory resource gets split into two entries, the code
1326  *   assumes that all children remain in the lower address entry for
1327  *   simplicity.  Enhance this logic when necessary.
1328  */
release_mem_region_adjustable(resource_size_t start,resource_size_t size)1329 void release_mem_region_adjustable(resource_size_t start, resource_size_t size)
1330 {
1331 	struct resource *parent = &iomem_resource;
1332 	struct resource *new_res = NULL;
1333 	bool alloc_nofail = false;
1334 	struct resource **p;
1335 	struct resource *res;
1336 	resource_size_t end;
1337 
1338 	end = start + size - 1;
1339 	if (WARN_ON_ONCE((start < parent->start) || (end > parent->end)))
1340 		return;
1341 
1342 	/*
1343 	 * We free up quite a lot of memory on memory hotunplug (esp., memap),
1344 	 * just before releasing the region. This is highly unlikely to
1345 	 * fail - let's play save and make it never fail as the caller cannot
1346 	 * perform any error handling (e.g., trying to re-add memory will fail
1347 	 * similarly).
1348 	 */
1349 retry:
1350 	new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0));
1351 
1352 	p = &parent->child;
1353 	write_lock(&resource_lock);
1354 
1355 	while ((res = *p)) {
1356 		if (res->start >= end)
1357 			break;
1358 
1359 		/* look for the next resource if it does not fit into */
1360 		if (res->start > start || res->end < end) {
1361 			p = &res->sibling;
1362 			continue;
1363 		}
1364 
1365 		/*
1366 		 * All memory regions added from memory-hotplug path have the
1367 		 * flag IORESOURCE_SYSTEM_RAM. If the resource does not have
1368 		 * this flag, we know that we are dealing with a resource coming
1369 		 * from HMM/devm. HMM/devm use another mechanism to add/release
1370 		 * a resource. This goes via devm_request_mem_region and
1371 		 * devm_release_mem_region.
1372 		 * HMM/devm take care to release their resources when they want,
1373 		 * so if we are dealing with them, let us just back off here.
1374 		 */
1375 		if (!(res->flags & IORESOURCE_SYSRAM)) {
1376 			break;
1377 		}
1378 
1379 		if (!(res->flags & IORESOURCE_MEM))
1380 			break;
1381 
1382 		if (!(res->flags & IORESOURCE_BUSY)) {
1383 			p = &res->child;
1384 			continue;
1385 		}
1386 
1387 		/* found the target resource; let's adjust accordingly */
1388 		if (res->start == start && res->end == end) {
1389 			/* free the whole entry */
1390 			*p = res->sibling;
1391 			free_resource(res);
1392 		} else if (res->start == start && res->end != end) {
1393 			/* adjust the start */
1394 			WARN_ON_ONCE(__adjust_resource(res, end + 1,
1395 						       res->end - end));
1396 		} else if (res->start != start && res->end == end) {
1397 			/* adjust the end */
1398 			WARN_ON_ONCE(__adjust_resource(res, res->start,
1399 						       start - res->start));
1400 		} else {
1401 			/* split into two entries - we need a new resource */
1402 			if (!new_res) {
1403 				new_res = alloc_resource(GFP_ATOMIC);
1404 				if (!new_res) {
1405 					alloc_nofail = true;
1406 					write_unlock(&resource_lock);
1407 					goto retry;
1408 				}
1409 			}
1410 			new_res->name = res->name;
1411 			new_res->start = end + 1;
1412 			new_res->end = res->end;
1413 			new_res->flags = res->flags;
1414 			new_res->desc = res->desc;
1415 			new_res->parent = res->parent;
1416 			new_res->sibling = res->sibling;
1417 			new_res->child = NULL;
1418 
1419 			if (WARN_ON_ONCE(__adjust_resource(res, res->start,
1420 							   start - res->start)))
1421 				break;
1422 			res->sibling = new_res;
1423 			new_res = NULL;
1424 		}
1425 
1426 		break;
1427 	}
1428 
1429 	write_unlock(&resource_lock);
1430 	free_resource(new_res);
1431 }
1432 #endif	/* CONFIG_MEMORY_HOTREMOVE */
1433 
1434 #ifdef CONFIG_MEMORY_HOTPLUG
system_ram_resources_mergeable(struct resource * r1,struct resource * r2)1435 static bool system_ram_resources_mergeable(struct resource *r1,
1436 					   struct resource *r2)
1437 {
1438 	/* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
1439 	return r1->flags == r2->flags && r1->end + 1 == r2->start &&
1440 	       r1->name == r2->name && r1->desc == r2->desc &&
1441 	       !r1->child && !r2->child;
1442 }
1443 
1444 /**
1445  * merge_system_ram_resource - mark the System RAM resource mergeable and try to
1446  *	merge it with adjacent, mergeable resources
1447  * @res: resource descriptor
1448  *
1449  * This interface is intended for memory hotplug, whereby lots of contiguous
1450  * system ram resources are added (e.g., via add_memory*()) by a driver, and
1451  * the actual resource boundaries are not of interest (e.g., it might be
1452  * relevant for DIMMs). Only resources that are marked mergeable, that have the
1453  * same parent, and that don't have any children are considered. All mergeable
1454  * resources must be immutable during the request.
1455  *
1456  * Note:
1457  * - The caller has to make sure that no pointers to resources that are
1458  *   marked mergeable are used anymore after this call - the resource might
1459  *   be freed and the pointer might be stale!
1460  * - release_mem_region_adjustable() will split on demand on memory hotunplug
1461  */
merge_system_ram_resource(struct resource * res)1462 void merge_system_ram_resource(struct resource *res)
1463 {
1464 	const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1465 	struct resource *cur;
1466 
1467 	if (WARN_ON_ONCE((res->flags & flags) != flags))
1468 		return;
1469 
1470 	write_lock(&resource_lock);
1471 	res->flags |= IORESOURCE_SYSRAM_MERGEABLE;
1472 
1473 	/* Try to merge with next item in the list. */
1474 	cur = res->sibling;
1475 	if (cur && system_ram_resources_mergeable(res, cur)) {
1476 		res->end = cur->end;
1477 		res->sibling = cur->sibling;
1478 		free_resource(cur);
1479 	}
1480 
1481 	/* Try to merge with previous item in the list. */
1482 	cur = res->parent->child;
1483 	while (cur && cur->sibling != res)
1484 		cur = cur->sibling;
1485 	if (cur && system_ram_resources_mergeable(cur, res)) {
1486 		cur->end = res->end;
1487 		cur->sibling = res->sibling;
1488 		free_resource(res);
1489 	}
1490 	write_unlock(&resource_lock);
1491 }
1492 #endif	/* CONFIG_MEMORY_HOTPLUG */
1493 
1494 /*
1495  * Managed region resource
1496  */
devm_resource_release(struct device * dev,void * ptr)1497 static void devm_resource_release(struct device *dev, void *ptr)
1498 {
1499 	struct resource **r = ptr;
1500 
1501 	release_resource(*r);
1502 }
1503 
1504 /**
1505  * devm_request_resource() - request and reserve an I/O or memory resource
1506  * @dev: device for which to request the resource
1507  * @root: root of the resource tree from which to request the resource
1508  * @new: descriptor of the resource to request
1509  *
1510  * This is a device-managed version of request_resource(). There is usually
1511  * no need to release resources requested by this function explicitly since
1512  * that will be taken care of when the device is unbound from its driver.
1513  * If for some reason the resource needs to be released explicitly, because
1514  * of ordering issues for example, drivers must call devm_release_resource()
1515  * rather than the regular release_resource().
1516  *
1517  * When a conflict is detected between any existing resources and the newly
1518  * requested resource, an error message will be printed.
1519  *
1520  * Returns 0 on success or a negative error code on failure.
1521  */
devm_request_resource(struct device * dev,struct resource * root,struct resource * new)1522 int devm_request_resource(struct device *dev, struct resource *root,
1523 			  struct resource *new)
1524 {
1525 	struct resource *conflict, **ptr;
1526 
1527 	ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1528 	if (!ptr)
1529 		return -ENOMEM;
1530 
1531 	*ptr = new;
1532 
1533 	conflict = request_resource_conflict(root, new);
1534 	if (conflict) {
1535 		dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1536 			new, conflict->name, conflict);
1537 		devres_free(ptr);
1538 		return -EBUSY;
1539 	}
1540 
1541 	devres_add(dev, ptr);
1542 	return 0;
1543 }
1544 EXPORT_SYMBOL(devm_request_resource);
1545 
devm_resource_match(struct device * dev,void * res,void * data)1546 static int devm_resource_match(struct device *dev, void *res, void *data)
1547 {
1548 	struct resource **ptr = res;
1549 
1550 	return *ptr == data;
1551 }
1552 
1553 /**
1554  * devm_release_resource() - release a previously requested resource
1555  * @dev: device for which to release the resource
1556  * @new: descriptor of the resource to release
1557  *
1558  * Releases a resource previously requested using devm_request_resource().
1559  */
devm_release_resource(struct device * dev,struct resource * new)1560 void devm_release_resource(struct device *dev, struct resource *new)
1561 {
1562 	WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1563 			       new));
1564 }
1565 EXPORT_SYMBOL(devm_release_resource);
1566 
1567 struct region_devres {
1568 	struct resource *parent;
1569 	resource_size_t start;
1570 	resource_size_t n;
1571 };
1572 
devm_region_release(struct device * dev,void * res)1573 static void devm_region_release(struct device *dev, void *res)
1574 {
1575 	struct region_devres *this = res;
1576 
1577 	__release_region(this->parent, this->start, this->n);
1578 }
1579 
devm_region_match(struct device * dev,void * res,void * match_data)1580 static int devm_region_match(struct device *dev, void *res, void *match_data)
1581 {
1582 	struct region_devres *this = res, *match = match_data;
1583 
1584 	return this->parent == match->parent &&
1585 		this->start == match->start && this->n == match->n;
1586 }
1587 
1588 struct resource *
__devm_request_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n,const char * name)1589 __devm_request_region(struct device *dev, struct resource *parent,
1590 		      resource_size_t start, resource_size_t n, const char *name)
1591 {
1592 	struct region_devres *dr = NULL;
1593 	struct resource *res;
1594 
1595 	dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1596 			  GFP_KERNEL);
1597 	if (!dr)
1598 		return NULL;
1599 
1600 	dr->parent = parent;
1601 	dr->start = start;
1602 	dr->n = n;
1603 
1604 	res = __request_region(parent, start, n, name, 0);
1605 	if (res)
1606 		devres_add(dev, dr);
1607 	else
1608 		devres_free(dr);
1609 
1610 	return res;
1611 }
1612 EXPORT_SYMBOL(__devm_request_region);
1613 
__devm_release_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n)1614 void __devm_release_region(struct device *dev, struct resource *parent,
1615 			   resource_size_t start, resource_size_t n)
1616 {
1617 	struct region_devres match_data = { parent, start, n };
1618 
1619 	__release_region(parent, start, n);
1620 	WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1621 			       &match_data));
1622 }
1623 EXPORT_SYMBOL(__devm_release_region);
1624 
1625 /*
1626  * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1627  */
1628 #define MAXRESERVE 4
reserve_setup(char * str)1629 static int __init reserve_setup(char *str)
1630 {
1631 	static int reserved;
1632 	static struct resource reserve[MAXRESERVE];
1633 
1634 	for (;;) {
1635 		unsigned int io_start, io_num;
1636 		int x = reserved;
1637 		struct resource *parent;
1638 
1639 		if (get_option(&str, &io_start) != 2)
1640 			break;
1641 		if (get_option(&str, &io_num) == 0)
1642 			break;
1643 		if (x < MAXRESERVE) {
1644 			struct resource *res = reserve + x;
1645 
1646 			/*
1647 			 * If the region starts below 0x10000, we assume it's
1648 			 * I/O port space; otherwise assume it's memory.
1649 			 */
1650 			if (io_start < 0x10000) {
1651 				res->flags = IORESOURCE_IO;
1652 				parent = &ioport_resource;
1653 			} else {
1654 				res->flags = IORESOURCE_MEM;
1655 				parent = &iomem_resource;
1656 			}
1657 			res->name = "reserved";
1658 			res->start = io_start;
1659 			res->end = io_start + io_num - 1;
1660 			res->flags |= IORESOURCE_BUSY;
1661 			res->desc = IORES_DESC_NONE;
1662 			res->child = NULL;
1663 			if (request_resource(parent, res) == 0)
1664 				reserved = x+1;
1665 		}
1666 	}
1667 	return 1;
1668 }
1669 __setup("reserve=", reserve_setup);
1670 
1671 /*
1672  * Check if the requested addr and size spans more than any slot in the
1673  * iomem resource tree.
1674  */
iomem_map_sanity_check(resource_size_t addr,unsigned long size)1675 int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1676 {
1677 	struct resource *p = &iomem_resource;
1678 	int err = 0;
1679 	loff_t l;
1680 
1681 	read_lock(&resource_lock);
1682 	for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1683 		/*
1684 		 * We can probably skip the resources without
1685 		 * IORESOURCE_IO attribute?
1686 		 */
1687 		if (p->start >= addr + size)
1688 			continue;
1689 		if (p->end < addr)
1690 			continue;
1691 		if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1692 		    PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
1693 			continue;
1694 		/*
1695 		 * if a resource is "BUSY", it's not a hardware resource
1696 		 * but a driver mapping of such a resource; we don't want
1697 		 * to warn for those; some drivers legitimately map only
1698 		 * partial hardware resources. (example: vesafb)
1699 		 */
1700 		if (p->flags & IORESOURCE_BUSY)
1701 			continue;
1702 
1703 		printk(KERN_WARNING "resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n",
1704 		       (unsigned long long)addr,
1705 		       (unsigned long long)(addr + size - 1),
1706 		       p->name, p);
1707 		err = -1;
1708 		break;
1709 	}
1710 	read_unlock(&resource_lock);
1711 
1712 	return err;
1713 }
1714 
1715 #ifdef CONFIG_STRICT_DEVMEM
1716 static int strict_iomem_checks = 1;
1717 #else
1718 static int strict_iomem_checks;
1719 #endif
1720 
1721 /*
1722  * Check if an address is exclusive to the kernel and must not be mapped to
1723  * user space, for example, via /dev/mem.
1724  *
1725  * Returns true if exclusive to the kernel, otherwise returns false.
1726  */
iomem_is_exclusive(u64 addr)1727 bool iomem_is_exclusive(u64 addr)
1728 {
1729 	const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
1730 						  IORESOURCE_EXCLUSIVE;
1731 	bool skip_children = false, err = false;
1732 	int size = PAGE_SIZE;
1733 	struct resource *p;
1734 
1735 	addr = addr & PAGE_MASK;
1736 
1737 	read_lock(&resource_lock);
1738 	for_each_resource(&iomem_resource, p, skip_children) {
1739 		if (p->start >= addr + size)
1740 			break;
1741 		if (p->end < addr) {
1742 			skip_children = true;
1743 			continue;
1744 		}
1745 		skip_children = false;
1746 
1747 		/*
1748 		 * IORESOURCE_SYSTEM_RAM resources are exclusive if
1749 		 * IORESOURCE_EXCLUSIVE is set, even if they
1750 		 * are not busy and even if "iomem=relaxed" is set. The
1751 		 * responsible driver dynamically adds/removes system RAM within
1752 		 * such an area and uncontrolled access is dangerous.
1753 		 */
1754 		if ((p->flags & exclusive_system_ram) == exclusive_system_ram) {
1755 			err = true;
1756 			break;
1757 		}
1758 
1759 		/*
1760 		 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1761 		 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1762 		 * resource is busy.
1763 		 */
1764 		if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY))
1765 			continue;
1766 		if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1767 				|| p->flags & IORESOURCE_EXCLUSIVE) {
1768 			err = true;
1769 			break;
1770 		}
1771 	}
1772 	read_unlock(&resource_lock);
1773 
1774 	return err;
1775 }
1776 
resource_list_create_entry(struct resource * res,size_t extra_size)1777 struct resource_entry *resource_list_create_entry(struct resource *res,
1778 						  size_t extra_size)
1779 {
1780 	struct resource_entry *entry;
1781 
1782 	entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1783 	if (entry) {
1784 		INIT_LIST_HEAD(&entry->node);
1785 		entry->res = res ? res : &entry->__res;
1786 	}
1787 
1788 	return entry;
1789 }
1790 EXPORT_SYMBOL(resource_list_create_entry);
1791 
resource_list_free(struct list_head * head)1792 void resource_list_free(struct list_head *head)
1793 {
1794 	struct resource_entry *entry, *tmp;
1795 
1796 	list_for_each_entry_safe(entry, tmp, head, node)
1797 		resource_list_destroy_entry(entry);
1798 }
1799 EXPORT_SYMBOL(resource_list_free);
1800 
1801 #ifdef CONFIG_DEVICE_PRIVATE
__request_free_mem_region(struct device * dev,struct resource * base,unsigned long size,const char * name)1802 static struct resource *__request_free_mem_region(struct device *dev,
1803 		struct resource *base, unsigned long size, const char *name)
1804 {
1805 	resource_size_t end, addr;
1806 	struct resource *res;
1807 	struct region_devres *dr = NULL;
1808 
1809 	size = ALIGN(size, 1UL << PA_SECTION_SHIFT);
1810 	end = min_t(unsigned long, base->end, (1UL << MAX_PHYSMEM_BITS) - 1);
1811 	addr = end - size + 1UL;
1812 
1813 	res = alloc_resource(GFP_KERNEL);
1814 	if (!res)
1815 		return ERR_PTR(-ENOMEM);
1816 
1817 	if (dev) {
1818 		dr = devres_alloc(devm_region_release,
1819 				sizeof(struct region_devres), GFP_KERNEL);
1820 		if (!dr) {
1821 			free_resource(res);
1822 			return ERR_PTR(-ENOMEM);
1823 		}
1824 	}
1825 
1826 	write_lock(&resource_lock);
1827 	for (; addr > size && addr >= base->start; addr -= size) {
1828 		if (__region_intersects(addr, size, 0, IORES_DESC_NONE) !=
1829 				REGION_DISJOINT)
1830 			continue;
1831 
1832 		if (__request_region_locked(res, &iomem_resource, addr, size,
1833 						name, 0))
1834 			break;
1835 
1836 		if (dev) {
1837 			dr->parent = &iomem_resource;
1838 			dr->start = addr;
1839 			dr->n = size;
1840 			devres_add(dev, dr);
1841 		}
1842 
1843 		res->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
1844 		write_unlock(&resource_lock);
1845 
1846 		/*
1847 		 * A driver is claiming this region so revoke any mappings.
1848 		 */
1849 		revoke_iomem(res);
1850 		return res;
1851 	}
1852 	write_unlock(&resource_lock);
1853 
1854 	free_resource(res);
1855 	if (dr)
1856 		devres_free(dr);
1857 
1858 	return ERR_PTR(-ERANGE);
1859 }
1860 
1861 /**
1862  * devm_request_free_mem_region - find free region for device private memory
1863  *
1864  * @dev: device struct to bind the resource to
1865  * @size: size in bytes of the device memory to add
1866  * @base: resource tree to look in
1867  *
1868  * This function tries to find an empty range of physical address big enough to
1869  * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
1870  * memory, which in turn allocates struct pages.
1871  */
devm_request_free_mem_region(struct device * dev,struct resource * base,unsigned long size)1872 struct resource *devm_request_free_mem_region(struct device *dev,
1873 		struct resource *base, unsigned long size)
1874 {
1875 	return __request_free_mem_region(dev, base, size, dev_name(dev));
1876 }
1877 EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
1878 
request_free_mem_region(struct resource * base,unsigned long size,const char * name)1879 struct resource *request_free_mem_region(struct resource *base,
1880 		unsigned long size, const char *name)
1881 {
1882 	return __request_free_mem_region(NULL, base, size, name);
1883 }
1884 EXPORT_SYMBOL_GPL(request_free_mem_region);
1885 
1886 #endif /* CONFIG_DEVICE_PRIVATE */
1887 
strict_iomem(char * str)1888 static int __init strict_iomem(char *str)
1889 {
1890 	if (strstr(str, "relaxed"))
1891 		strict_iomem_checks = 0;
1892 	if (strstr(str, "strict"))
1893 		strict_iomem_checks = 1;
1894 	return 1;
1895 }
1896 
iomem_fs_init_fs_context(struct fs_context * fc)1897 static int iomem_fs_init_fs_context(struct fs_context *fc)
1898 {
1899 	return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
1900 }
1901 
1902 static struct file_system_type iomem_fs_type = {
1903 	.name		= "iomem",
1904 	.owner		= THIS_MODULE,
1905 	.init_fs_context = iomem_fs_init_fs_context,
1906 	.kill_sb	= kill_anon_super,
1907 };
1908 
iomem_init_inode(void)1909 static int __init iomem_init_inode(void)
1910 {
1911 	static struct vfsmount *iomem_vfs_mount;
1912 	static int iomem_fs_cnt;
1913 	struct inode *inode;
1914 	int rc;
1915 
1916 	rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
1917 	if (rc < 0) {
1918 		pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
1919 		return rc;
1920 	}
1921 
1922 	inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
1923 	if (IS_ERR(inode)) {
1924 		rc = PTR_ERR(inode);
1925 		pr_err("Cannot allocate inode for iomem: %d\n", rc);
1926 		simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
1927 		return rc;
1928 	}
1929 
1930 	/*
1931 	 * Publish iomem revocation inode initialized.
1932 	 * Pairs with smp_load_acquire() in revoke_iomem().
1933 	 */
1934 	smp_store_release(&iomem_inode, inode);
1935 
1936 	return 0;
1937 }
1938 
1939 fs_initcall(iomem_init_inode);
1940 
1941 __setup("iomem=", strict_iomem);
1942