1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Google, Inc
4  * Written by Simon Glass <sjg@chromium.org>
5  * Copyright (c) 2016, NVIDIA CORPORATION.
6  * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
7  */
8 
9 #include <common.h>
10 #include <clk.h>
11 #include <clk-uclass.h>
12 #include <dm.h>
13 #include <dt-structs.h>
14 #include <errno.h>
15 #include <log.h>
16 #include <malloc.h>
17 #include <dm/device-internal.h>
18 #include <dm/devres.h>
19 #include <dm/read.h>
20 #include <linux/bug.h>
21 #include <linux/clk-provider.h>
22 #include <linux/err.h>
23 #include <asm/global_data.h>
24 
clk_dev_ops(struct udevice * dev)25 static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
26 {
27 	return (const struct clk_ops *)dev->driver->ops;
28 }
29 
dev_get_clk_ptr(struct udevice * dev)30 struct clk *dev_get_clk_ptr(struct udevice *dev)
31 {
32 	return (struct clk *)dev_get_uclass_priv(dev);
33 }
34 
35 #if CONFIG_IS_ENABLED(OF_CONTROL)
36 # if CONFIG_IS_ENABLED(OF_PLATDATA)
clk_get_by_driver_info(struct udevice * dev,struct phandle_1_arg * cells,struct clk * clk)37 int clk_get_by_driver_info(struct udevice *dev, struct phandle_1_arg *cells,
38 			   struct clk *clk)
39 {
40 	int ret;
41 
42 	ret = device_get_by_driver_info_idx(cells->idx, &clk->dev);
43 	if (ret)
44 		return ret;
45 	clk->id = cells->arg[0];
46 
47 	return 0;
48 }
49 # else
clk_of_xlate_default(struct clk * clk,struct ofnode_phandle_args * args)50 static int clk_of_xlate_default(struct clk *clk,
51 				struct ofnode_phandle_args *args)
52 {
53 	debug("%s(clk=%p)\n", __func__, clk);
54 
55 	if (args->args_count > 1) {
56 		debug("Invaild args_count: %d\n", args->args_count);
57 		return -EINVAL;
58 	}
59 
60 	if (args->args_count)
61 		clk->id = args->args[0];
62 	else
63 		clk->id = 0;
64 
65 	clk->data = 0;
66 
67 	return 0;
68 }
69 
clk_get_by_index_tail(int ret,ofnode node,struct ofnode_phandle_args * args,const char * list_name,int index,struct clk * clk)70 static int clk_get_by_index_tail(int ret, ofnode node,
71 				 struct ofnode_phandle_args *args,
72 				 const char *list_name, int index,
73 				 struct clk *clk)
74 {
75 	struct udevice *dev_clk;
76 	const struct clk_ops *ops;
77 
78 	assert(clk);
79 	clk->dev = NULL;
80 	if (ret)
81 		goto err;
82 
83 	ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
84 	if (ret) {
85 		debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
86 		      __func__, ret);
87 		return log_msg_ret("get", ret);
88 	}
89 
90 	clk->dev = dev_clk;
91 
92 	ops = clk_dev_ops(dev_clk);
93 
94 	if (ops->of_xlate)
95 		ret = ops->of_xlate(clk, args);
96 	else
97 		ret = clk_of_xlate_default(clk, args);
98 	if (ret) {
99 		debug("of_xlate() failed: %d\n", ret);
100 		return log_msg_ret("xlate", ret);
101 	}
102 
103 	return clk_request(dev_clk, clk);
104 err:
105 	debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
106 	      __func__, ofnode_get_name(node), list_name, index, ret);
107 
108 	return log_msg_ret("prop", ret);
109 }
110 
clk_get_by_indexed_prop(struct udevice * dev,const char * prop_name,int index,struct clk * clk)111 static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
112 				   int index, struct clk *clk)
113 {
114 	int ret;
115 	struct ofnode_phandle_args args;
116 
117 	debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
118 
119 	assert(clk);
120 	clk->dev = NULL;
121 
122 	ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
123 					 index, &args);
124 	if (ret) {
125 		debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
126 		      __func__, ret);
127 		return log_ret(ret);
128 	}
129 
130 
131 	return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
132 				     index, clk);
133 }
134 
clk_get_by_index(struct udevice * dev,int index,struct clk * clk)135 int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
136 {
137 	struct ofnode_phandle_args args;
138 	int ret;
139 
140 	ret = dev_read_phandle_with_args(dev, "clocks", "#clock-cells", 0,
141 					 index, &args);
142 
143 	return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
144 				     index, clk);
145 }
146 
clk_get_by_index_nodev(ofnode node,int index,struct clk * clk)147 int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
148 {
149 	struct ofnode_phandle_args args;
150 	int ret;
151 
152 	ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
153 					     index, &args);
154 
155 	return clk_get_by_index_tail(ret, node, &args, "clocks",
156 				     index, clk);
157 }
158 
clk_get_bulk(struct udevice * dev,struct clk_bulk * bulk)159 int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
160 {
161 	int i, ret, err, count;
162 
163 	bulk->count = 0;
164 
165 	count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells", 0);
166 	if (count < 1)
167 		return count;
168 
169 	bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
170 	if (!bulk->clks)
171 		return -ENOMEM;
172 
173 	for (i = 0; i < count; i++) {
174 		ret = clk_get_by_index(dev, i, &bulk->clks[i]);
175 		if (ret < 0)
176 			goto bulk_get_err;
177 
178 		++bulk->count;
179 	}
180 
181 	return 0;
182 
183 bulk_get_err:
184 	err = clk_release_all(bulk->clks, bulk->count);
185 	if (err)
186 		debug("%s: could release all clocks for %p\n",
187 		      __func__, dev);
188 
189 	return ret;
190 }
191 
clk_set_default_get_by_id(struct clk * clk)192 static struct clk *clk_set_default_get_by_id(struct clk *clk)
193 {
194 	struct clk *c = clk;
195 
196 	if (CONFIG_IS_ENABLED(CLK_CCF)) {
197 		int ret = clk_get_by_id(clk->id, &c);
198 
199 		if (ret) {
200 			debug("%s(): could not get parent clock pointer, id %lu\n",
201 			      __func__, clk->id);
202 			ERR_PTR(ret);
203 		}
204 	}
205 
206 	return c;
207 }
208 
clk_set_default_parents(struct udevice * dev,int stage)209 static int clk_set_default_parents(struct udevice *dev, int stage)
210 {
211 	struct clk clk, parent_clk, *c, *p;
212 	int index;
213 	int num_parents;
214 	int ret;
215 
216 	num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
217 						  "#clock-cells", 0);
218 	if (num_parents < 0) {
219 		debug("%s: could not read assigned-clock-parents for %p\n",
220 		      __func__, dev);
221 		return 0;
222 	}
223 
224 	for (index = 0; index < num_parents; index++) {
225 		ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
226 					      index, &parent_clk);
227 		/* If -ENOENT, this is a no-op entry */
228 		if (ret == -ENOENT)
229 			continue;
230 
231 		if (ret) {
232 			debug("%s: could not get parent clock %d for %s\n",
233 			      __func__, index, dev_read_name(dev));
234 			return ret;
235 		}
236 
237 		p = clk_set_default_get_by_id(&parent_clk);
238 		if (IS_ERR(p))
239 			return PTR_ERR(p);
240 
241 		ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
242 					      index, &clk);
243 		if (ret) {
244 			debug("%s: could not get assigned clock %d for %s\n",
245 			      __func__, index, dev_read_name(dev));
246 			return ret;
247 		}
248 
249 		/* This is clk provider device trying to reparent itself
250 		 * It cannot be done right now but need to wait after the
251 		 * device is probed
252 		 */
253 		if (stage == 0 && clk.dev == dev)
254 			continue;
255 
256 		if (stage > 0 && clk.dev != dev)
257 			/* do not setup twice the parent clocks */
258 			continue;
259 
260 		c = clk_set_default_get_by_id(&clk);
261 		if (IS_ERR(c))
262 			return PTR_ERR(c);
263 
264 		ret = clk_set_parent(c, p);
265 		/*
266 		 * Not all drivers may support clock-reparenting (as of now).
267 		 * Ignore errors due to this.
268 		 */
269 		if (ret == -ENOSYS)
270 			continue;
271 
272 		if (ret < 0) {
273 			debug("%s: failed to reparent clock %d for %s\n",
274 			      __func__, index, dev_read_name(dev));
275 			return ret;
276 		}
277 	}
278 
279 	return 0;
280 }
281 
clk_set_default_rates(struct udevice * dev,int stage)282 static int clk_set_default_rates(struct udevice *dev, int stage)
283 {
284 	struct clk clk, *c;
285 	int index;
286 	int num_rates;
287 	int size;
288 	int ret = 0;
289 	u32 *rates = NULL;
290 
291 	size = dev_read_size(dev, "assigned-clock-rates");
292 	if (size < 0)
293 		return 0;
294 
295 	num_rates = size / sizeof(u32);
296 	rates = calloc(num_rates, sizeof(u32));
297 	if (!rates)
298 		return -ENOMEM;
299 
300 	ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
301 	if (ret)
302 		goto fail;
303 
304 	for (index = 0; index < num_rates; index++) {
305 		/* If 0 is passed, this is a no-op */
306 		if (!rates[index])
307 			continue;
308 
309 		ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
310 					      index, &clk);
311 		if (ret) {
312 			debug("%s: could not get assigned clock %d for %s\n",
313 			      __func__, index, dev_read_name(dev));
314 			continue;
315 		}
316 
317 		/* This is clk provider device trying to program itself
318 		 * It cannot be done right now but need to wait after the
319 		 * device is probed
320 		 */
321 		if (stage == 0 && clk.dev == dev)
322 			continue;
323 
324 		if (stage > 0 && clk.dev != dev)
325 			/* do not setup twice the parent clocks */
326 			continue;
327 
328 		c = clk_set_default_get_by_id(&clk);
329 		if (IS_ERR(c))
330 			return PTR_ERR(c);
331 
332 		ret = clk_set_rate(c, rates[index]);
333 
334 		if (ret < 0) {
335 			debug("%s: failed to set rate on clock index %d (%ld) for %s\n",
336 			      __func__, index, clk.id, dev_read_name(dev));
337 			break;
338 		}
339 	}
340 
341 fail:
342 	free(rates);
343 	return ret;
344 }
345 
clk_set_defaults(struct udevice * dev,int stage)346 int clk_set_defaults(struct udevice *dev, int stage)
347 {
348 	int ret;
349 
350 	if (!dev_has_ofnode(dev))
351 		return 0;
352 
353 	/* If this not in SPL and pre-reloc state, don't take any action. */
354 	if (!(IS_ENABLED(CONFIG_SPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
355 		return 0;
356 
357 	debug("%s(%s)\n", __func__, dev_read_name(dev));
358 
359 	ret = clk_set_default_parents(dev, stage);
360 	if (ret)
361 		return ret;
362 
363 	ret = clk_set_default_rates(dev, stage);
364 	if (ret < 0)
365 		return ret;
366 
367 	return 0;
368 }
369 
clk_get_by_name(struct udevice * dev,const char * name,struct clk * clk)370 int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
371 {
372 	int index;
373 
374 	debug("%s(dev=%p, name=%s, clk=%p)\n", __func__, dev, name, clk);
375 	clk->dev = NULL;
376 
377 	index = dev_read_stringlist_search(dev, "clock-names", name);
378 	if (index < 0) {
379 		debug("fdt_stringlist_search() failed: %d\n", index);
380 		return index;
381 	}
382 
383 	return clk_get_by_index(dev, index, clk);
384 }
385 # endif /* OF_PLATDATA */
386 
clk_get_by_name_nodev(ofnode node,const char * name,struct clk * clk)387 int clk_get_by_name_nodev(ofnode node, const char *name, struct clk *clk)
388 {
389 	int index;
390 
391 	debug("%s(node=%p, name=%s, clk=%p)\n", __func__,
392 		ofnode_get_name(node), name, clk);
393 	clk->dev = NULL;
394 
395 	index = ofnode_stringlist_search(node, "clock-names", name);
396 	if (index < 0) {
397 		debug("fdt_stringlist_search() failed: %d\n", index);
398 		return index;
399 	}
400 
401 	return clk_get_by_index_nodev(node, index, clk);
402 }
403 
clk_get_optional_nodev(ofnode node,const char * name,struct clk * clk)404 int clk_get_optional_nodev(ofnode node, const char *name, struct clk *clk)
405 {
406 	int ret;
407 
408 	ret = clk_get_by_name_nodev(node, name, clk);
409 	if (ret == -ENODATA)
410 		return 0;
411 
412 	return ret;
413 }
414 
clk_release_all(struct clk * clk,int count)415 int clk_release_all(struct clk *clk, int count)
416 {
417 	int i, ret;
418 
419 	for (i = 0; i < count; i++) {
420 		debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
421 
422 		/* check if clock has been previously requested */
423 		if (!clk[i].dev)
424 			continue;
425 
426 		ret = clk_disable(&clk[i]);
427 		if (ret && ret != -ENOSYS)
428 			return ret;
429 
430 		ret = clk_free(&clk[i]);
431 		if (ret && ret != -ENOSYS)
432 			return ret;
433 	}
434 
435 	return 0;
436 }
437 
438 #endif /* OF_CONTROL */
439 
clk_request(struct udevice * dev,struct clk * clk)440 int clk_request(struct udevice *dev, struct clk *clk)
441 {
442 	const struct clk_ops *ops;
443 
444 	debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
445 	if (!clk)
446 		return 0;
447 	ops = clk_dev_ops(dev);
448 
449 	clk->dev = dev;
450 
451 	if (!ops->request)
452 		return 0;
453 
454 	return ops->request(clk);
455 }
456 
clk_free(struct clk * clk)457 int clk_free(struct clk *clk)
458 {
459 	const struct clk_ops *ops;
460 
461 	debug("%s(clk=%p)\n", __func__, clk);
462 	if (!clk_valid(clk))
463 		return 0;
464 	ops = clk_dev_ops(clk->dev);
465 
466 	if (!ops->rfree)
467 		return 0;
468 
469 	return ops->rfree(clk);
470 }
471 
clk_get_rate(struct clk * clk)472 ulong clk_get_rate(struct clk *clk)
473 {
474 	const struct clk_ops *ops;
475 	int ret;
476 
477 	debug("%s(clk=%p)\n", __func__, clk);
478 	if (!clk_valid(clk))
479 		return 0;
480 	ops = clk_dev_ops(clk->dev);
481 
482 	if (!ops->get_rate)
483 		return -ENOSYS;
484 
485 	ret = ops->get_rate(clk);
486 	if (ret)
487 		return log_ret(ret);
488 
489 	return 0;
490 }
491 
clk_get_parent(struct clk * clk)492 struct clk *clk_get_parent(struct clk *clk)
493 {
494 	struct udevice *pdev;
495 	struct clk *pclk;
496 
497 	debug("%s(clk=%p)\n", __func__, clk);
498 	if (!clk_valid(clk))
499 		return NULL;
500 
501 	pdev = dev_get_parent(clk->dev);
502 	pclk = dev_get_clk_ptr(pdev);
503 	if (!pclk)
504 		return ERR_PTR(-ENODEV);
505 
506 	return pclk;
507 }
508 
clk_get_parent_rate(struct clk * clk)509 long long clk_get_parent_rate(struct clk *clk)
510 {
511 	const struct clk_ops *ops;
512 	struct clk *pclk;
513 
514 	debug("%s(clk=%p)\n", __func__, clk);
515 	if (!clk_valid(clk))
516 		return 0;
517 
518 	pclk = clk_get_parent(clk);
519 	if (IS_ERR(pclk))
520 		return -ENODEV;
521 
522 	ops = clk_dev_ops(pclk->dev);
523 	if (!ops->get_rate)
524 		return -ENOSYS;
525 
526 	/* Read the 'rate' if not already set or if proper flag set*/
527 	if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
528 		pclk->rate = clk_get_rate(pclk);
529 
530 	return pclk->rate;
531 }
532 
clk_round_rate(struct clk * clk,ulong rate)533 ulong clk_round_rate(struct clk *clk, ulong rate)
534 {
535 	const struct clk_ops *ops;
536 
537 	debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
538 	if (!clk_valid(clk))
539 		return 0;
540 
541 	ops = clk_dev_ops(clk->dev);
542 	if (!ops->round_rate)
543 		return -ENOSYS;
544 
545 	return ops->round_rate(clk, rate);
546 }
547 
clk_set_rate(struct clk * clk,ulong rate)548 ulong clk_set_rate(struct clk *clk, ulong rate)
549 {
550 	const struct clk_ops *ops;
551 
552 	debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
553 	if (!clk_valid(clk))
554 		return 0;
555 	ops = clk_dev_ops(clk->dev);
556 
557 	if (!ops->set_rate)
558 		return -ENOSYS;
559 
560 	return ops->set_rate(clk, rate);
561 }
562 
clk_set_parent(struct clk * clk,struct clk * parent)563 int clk_set_parent(struct clk *clk, struct clk *parent)
564 {
565 	const struct clk_ops *ops;
566 	int ret;
567 
568 	debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
569 	if (!clk_valid(clk))
570 		return 0;
571 	ops = clk_dev_ops(clk->dev);
572 
573 	if (!ops->set_parent)
574 		return -ENOSYS;
575 
576 	ret = ops->set_parent(clk, parent);
577 	if (ret)
578 		return ret;
579 
580 	if (CONFIG_IS_ENABLED(CLK_CCF))
581 		ret = device_reparent(clk->dev, parent->dev);
582 
583 	return ret;
584 }
585 
clk_enable(struct clk * clk)586 int clk_enable(struct clk *clk)
587 {
588 	const struct clk_ops *ops;
589 	struct clk *clkp = NULL;
590 	int ret;
591 
592 	debug("%s(clk=%p)\n", __func__, clk);
593 	if (!clk_valid(clk))
594 		return 0;
595 	ops = clk_dev_ops(clk->dev);
596 
597 	if (CONFIG_IS_ENABLED(CLK_CCF)) {
598 		/* Take id 0 as a non-valid clk, such as dummy */
599 		if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
600 			if (clkp->enable_count) {
601 				clkp->enable_count++;
602 				return 0;
603 			}
604 			if (clkp->dev->parent &&
605 			    device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
606 				ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
607 				if (ret) {
608 					printf("Enable %s failed\n",
609 					       clkp->dev->parent->name);
610 					return ret;
611 				}
612 			}
613 		}
614 
615 		if (ops->enable) {
616 			ret = ops->enable(clk);
617 			if (ret) {
618 				printf("Enable %s failed\n", clk->dev->name);
619 				return ret;
620 			}
621 		}
622 		if (clkp)
623 			clkp->enable_count++;
624 	} else {
625 		if (!ops->enable)
626 			return -ENOSYS;
627 		return ops->enable(clk);
628 	}
629 
630 	return 0;
631 }
632 
clk_enable_bulk(struct clk_bulk * bulk)633 int clk_enable_bulk(struct clk_bulk *bulk)
634 {
635 	int i, ret;
636 
637 	for (i = 0; i < bulk->count; i++) {
638 		ret = clk_enable(&bulk->clks[i]);
639 		if (ret < 0 && ret != -ENOSYS)
640 			return ret;
641 	}
642 
643 	return 0;
644 }
645 
clk_disable(struct clk * clk)646 int clk_disable(struct clk *clk)
647 {
648 	const struct clk_ops *ops;
649 	struct clk *clkp = NULL;
650 	int ret;
651 
652 	debug("%s(clk=%p)\n", __func__, clk);
653 	if (!clk_valid(clk))
654 		return 0;
655 	ops = clk_dev_ops(clk->dev);
656 
657 	if (CONFIG_IS_ENABLED(CLK_CCF)) {
658 		if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
659 			if (clkp->flags & CLK_IS_CRITICAL)
660 				return 0;
661 
662 			if (clkp->enable_count == 0) {
663 				printf("clk %s already disabled\n",
664 				       clkp->dev->name);
665 				return 0;
666 			}
667 
668 			if (--clkp->enable_count > 0)
669 				return 0;
670 		}
671 
672 		if (ops->disable) {
673 			ret = ops->disable(clk);
674 			if (ret)
675 				return ret;
676 		}
677 
678 		if (clkp && clkp->dev->parent &&
679 		    device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
680 			ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
681 			if (ret) {
682 				printf("Disable %s failed\n",
683 				       clkp->dev->parent->name);
684 				return ret;
685 			}
686 		}
687 	} else {
688 		if (!ops->disable)
689 			return -ENOSYS;
690 
691 		return ops->disable(clk);
692 	}
693 
694 	return 0;
695 }
696 
clk_disable_bulk(struct clk_bulk * bulk)697 int clk_disable_bulk(struct clk_bulk *bulk)
698 {
699 	int i, ret;
700 
701 	for (i = 0; i < bulk->count; i++) {
702 		ret = clk_disable(&bulk->clks[i]);
703 		if (ret < 0 && ret != -ENOSYS)
704 			return ret;
705 	}
706 
707 	return 0;
708 }
709 
clk_get_by_id(ulong id,struct clk ** clkp)710 int clk_get_by_id(ulong id, struct clk **clkp)
711 {
712 	struct udevice *dev;
713 	struct uclass *uc;
714 	int ret;
715 
716 	ret = uclass_get(UCLASS_CLK, &uc);
717 	if (ret)
718 		return ret;
719 
720 	uclass_foreach_dev(dev, uc) {
721 		struct clk *clk = dev_get_clk_ptr(dev);
722 
723 		if (clk && clk->id == id) {
724 			*clkp = clk;
725 			return 0;
726 		}
727 	}
728 
729 	return -ENOENT;
730 }
731 
clk_is_match(const struct clk * p,const struct clk * q)732 bool clk_is_match(const struct clk *p, const struct clk *q)
733 {
734 	/* trivial case: identical struct clk's or both NULL */
735 	if (p == q)
736 		return true;
737 
738 	/* trivial case #2: on the clk pointer is NULL */
739 	if (!p || !q)
740 		return false;
741 
742 	/* same device, id and data */
743 	if (p->dev == q->dev && p->id == q->id && p->data == q->data)
744 		return true;
745 
746 	return false;
747 }
748 
devm_clk_release(struct udevice * dev,void * res)749 static void devm_clk_release(struct udevice *dev, void *res)
750 {
751 	clk_free(res);
752 }
753 
devm_clk_match(struct udevice * dev,void * res,void * data)754 static int devm_clk_match(struct udevice *dev, void *res, void *data)
755 {
756 	return res == data;
757 }
758 
devm_clk_get(struct udevice * dev,const char * id)759 struct clk *devm_clk_get(struct udevice *dev, const char *id)
760 {
761 	int rc;
762 	struct clk *clk;
763 
764 	clk = devres_alloc(devm_clk_release, sizeof(struct clk), __GFP_ZERO);
765 	if (unlikely(!clk))
766 		return ERR_PTR(-ENOMEM);
767 
768 	rc = clk_get_by_name(dev, id, clk);
769 	if (rc)
770 		return ERR_PTR(rc);
771 
772 	devres_add(dev, clk);
773 	return clk;
774 }
775 
devm_clk_get_optional(struct udevice * dev,const char * id)776 struct clk *devm_clk_get_optional(struct udevice *dev, const char *id)
777 {
778 	struct clk *clk = devm_clk_get(dev, id);
779 
780 	if (PTR_ERR(clk) == -ENODATA)
781 		return NULL;
782 
783 	return clk;
784 }
785 
devm_clk_put(struct udevice * dev,struct clk * clk)786 void devm_clk_put(struct udevice *dev, struct clk *clk)
787 {
788 	int rc;
789 
790 	if (!clk)
791 		return;
792 
793 	rc = devres_release(dev, devm_clk_release, devm_clk_match, clk);
794 	WARN_ON(rc);
795 }
796 
clk_uclass_post_probe(struct udevice * dev)797 int clk_uclass_post_probe(struct udevice *dev)
798 {
799 	/*
800 	 * when a clock provider is probed. Call clk_set_defaults()
801 	 * also after the device is probed. This takes care of cases
802 	 * where the DT is used to setup default parents and rates
803 	 * using assigned-clocks
804 	 */
805 	clk_set_defaults(dev, 1);
806 
807 	return 0;
808 }
809 
810 UCLASS_DRIVER(clk) = {
811 	.id		= UCLASS_CLK,
812 	.name		= "clk",
813 	.post_probe	= clk_uclass_post_probe,
814 };
815