1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
4  *
5  * Authors:
6  *   Serge Semin <Sergey.Semin@baikalelectronics.ru>
7  *   Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
8  *
9  * Baikal-T1 CCU PLL interface driver
10  */
11 
12 #define pr_fmt(fmt) "bt1-ccu-pll: " fmt
13 
14 #include <linux/kernel.h>
15 #include <linux/printk.h>
16 #include <linux/limits.h>
17 #include <linux/bits.h>
18 #include <linux/bitfield.h>
19 #include <linux/slab.h>
20 #include <linux/clk-provider.h>
21 #include <linux/of.h>
22 #include <linux/spinlock.h>
23 #include <linux/regmap.h>
24 #include <linux/iopoll.h>
25 #include <linux/time64.h>
26 #include <linux/rational.h>
27 #include <linux/debugfs.h>
28 
29 #include "ccu-pll.h"
30 
31 #define CCU_PLL_CTL			0x000
32 #define CCU_PLL_CTL_EN			BIT(0)
33 #define CCU_PLL_CTL_RST			BIT(1)
34 #define CCU_PLL_CTL_CLKR_FLD		2
35 #define CCU_PLL_CTL_CLKR_MASK		GENMASK(7, CCU_PLL_CTL_CLKR_FLD)
36 #define CCU_PLL_CTL_CLKF_FLD		8
37 #define CCU_PLL_CTL_CLKF_MASK		GENMASK(20, CCU_PLL_CTL_CLKF_FLD)
38 #define CCU_PLL_CTL_CLKOD_FLD		21
39 #define CCU_PLL_CTL_CLKOD_MASK		GENMASK(24, CCU_PLL_CTL_CLKOD_FLD)
40 #define CCU_PLL_CTL_BYPASS		BIT(30)
41 #define CCU_PLL_CTL_LOCK		BIT(31)
42 #define CCU_PLL_CTL1			0x004
43 #define CCU_PLL_CTL1_BWADJ_FLD		3
44 #define CCU_PLL_CTL1_BWADJ_MASK		GENMASK(14, CCU_PLL_CTL1_BWADJ_FLD)
45 
46 #define CCU_PLL_LOCK_CHECK_RETRIES	50
47 
48 #define CCU_PLL_NR_MAX \
49 	((CCU_PLL_CTL_CLKR_MASK >> CCU_PLL_CTL_CLKR_FLD) + 1)
50 #define CCU_PLL_NF_MAX \
51 	((CCU_PLL_CTL_CLKF_MASK >> (CCU_PLL_CTL_CLKF_FLD + 1)) + 1)
52 #define CCU_PLL_OD_MAX \
53 	((CCU_PLL_CTL_CLKOD_MASK >> CCU_PLL_CTL_CLKOD_FLD) + 1)
54 #define CCU_PLL_NB_MAX \
55 	((CCU_PLL_CTL1_BWADJ_MASK >> CCU_PLL_CTL1_BWADJ_FLD) + 1)
56 #define CCU_PLL_FDIV_MIN		427000UL
57 #define CCU_PLL_FDIV_MAX		3500000000UL
58 #define CCU_PLL_FOUT_MIN		200000000UL
59 #define CCU_PLL_FOUT_MAX		2500000000UL
60 #define CCU_PLL_FVCO_MIN		700000000UL
61 #define CCU_PLL_FVCO_MAX		3500000000UL
62 #define CCU_PLL_CLKOD_FACTOR		2
63 
ccu_pll_lock_delay_us(unsigned long ref_clk,unsigned long nr)64 static inline unsigned long ccu_pll_lock_delay_us(unsigned long ref_clk,
65 						  unsigned long nr)
66 {
67 	u64 us = 500ULL * nr * USEC_PER_SEC;
68 
69 	do_div(us, ref_clk);
70 
71 	return us;
72 }
73 
ccu_pll_calc_freq(unsigned long ref_clk,unsigned long nr,unsigned long nf,unsigned long od)74 static inline unsigned long ccu_pll_calc_freq(unsigned long ref_clk,
75 					      unsigned long nr,
76 					      unsigned long nf,
77 					      unsigned long od)
78 {
79 	u64 tmp = ref_clk;
80 
81 	do_div(tmp, nr);
82 	tmp *= nf;
83 	do_div(tmp, od);
84 
85 	return tmp;
86 }
87 
ccu_pll_reset(struct ccu_pll * pll,unsigned long ref_clk,unsigned long nr)88 static int ccu_pll_reset(struct ccu_pll *pll, unsigned long ref_clk,
89 			 unsigned long nr)
90 {
91 	unsigned long ud, ut;
92 	u32 val;
93 
94 	ud = ccu_pll_lock_delay_us(ref_clk, nr);
95 	ut = ud * CCU_PLL_LOCK_CHECK_RETRIES;
96 
97 	regmap_update_bits(pll->sys_regs, pll->reg_ctl,
98 			   CCU_PLL_CTL_RST, CCU_PLL_CTL_RST);
99 
100 	return regmap_read_poll_timeout_atomic(pll->sys_regs, pll->reg_ctl, val,
101 					       val & CCU_PLL_CTL_LOCK, ud, ut);
102 }
103 
ccu_pll_enable(struct clk_hw * hw)104 static int ccu_pll_enable(struct clk_hw *hw)
105 {
106 	struct clk_hw *parent_hw = clk_hw_get_parent(hw);
107 	struct ccu_pll *pll = to_ccu_pll(hw);
108 	unsigned long flags;
109 	u32 val = 0;
110 	int ret;
111 
112 	if (!parent_hw) {
113 		pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw));
114 		return -EINVAL;
115 	}
116 
117 	regmap_read(pll->sys_regs, pll->reg_ctl, &val);
118 	if (val & CCU_PLL_CTL_EN)
119 		return 0;
120 
121 	spin_lock_irqsave(&pll->lock, flags);
122 	regmap_write(pll->sys_regs, pll->reg_ctl, val | CCU_PLL_CTL_EN);
123 	ret = ccu_pll_reset(pll, clk_hw_get_rate(parent_hw),
124 			    FIELD_GET(CCU_PLL_CTL_CLKR_MASK, val) + 1);
125 	spin_unlock_irqrestore(&pll->lock, flags);
126 	if (ret)
127 		pr_err("PLL '%s' reset timed out\n", clk_hw_get_name(hw));
128 
129 	return ret;
130 }
131 
ccu_pll_disable(struct clk_hw * hw)132 static void ccu_pll_disable(struct clk_hw *hw)
133 {
134 	struct ccu_pll *pll = to_ccu_pll(hw);
135 	unsigned long flags;
136 
137 	spin_lock_irqsave(&pll->lock, flags);
138 	regmap_update_bits(pll->sys_regs, pll->reg_ctl, CCU_PLL_CTL_EN, 0);
139 	spin_unlock_irqrestore(&pll->lock, flags);
140 }
141 
ccu_pll_is_enabled(struct clk_hw * hw)142 static int ccu_pll_is_enabled(struct clk_hw *hw)
143 {
144 	struct ccu_pll *pll = to_ccu_pll(hw);
145 	u32 val = 0;
146 
147 	regmap_read(pll->sys_regs, pll->reg_ctl, &val);
148 
149 	return !!(val & CCU_PLL_CTL_EN);
150 }
151 
ccu_pll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)152 static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
153 					 unsigned long parent_rate)
154 {
155 	struct ccu_pll *pll = to_ccu_pll(hw);
156 	unsigned long nr, nf, od;
157 	u32 val = 0;
158 
159 	regmap_read(pll->sys_regs, pll->reg_ctl, &val);
160 	nr = FIELD_GET(CCU_PLL_CTL_CLKR_MASK, val) + 1;
161 	nf = FIELD_GET(CCU_PLL_CTL_CLKF_MASK, val) + 1;
162 	od = FIELD_GET(CCU_PLL_CTL_CLKOD_MASK, val) + 1;
163 
164 	return ccu_pll_calc_freq(parent_rate, nr, nf, od);
165 }
166 
ccu_pll_calc_factors(unsigned long rate,unsigned long parent_rate,unsigned long * nr,unsigned long * nf,unsigned long * od)167 static void ccu_pll_calc_factors(unsigned long rate, unsigned long parent_rate,
168 				 unsigned long *nr, unsigned long *nf,
169 				 unsigned long *od)
170 {
171 	unsigned long err, freq, min_err = ULONG_MAX;
172 	unsigned long num, denom, n1, d1, nri;
173 	unsigned long nr_max, nf_max, od_max;
174 
175 	/*
176 	 * Make sure PLL is working with valid input signal (Fdiv). If
177 	 * you want to speed the function up just reduce CCU_PLL_NR_MAX.
178 	 * This will cause a worse approximation though.
179 	 */
180 	nri = (parent_rate / CCU_PLL_FDIV_MAX) + 1;
181 	nr_max = min(parent_rate / CCU_PLL_FDIV_MIN, CCU_PLL_NR_MAX);
182 
183 	/*
184 	 * Find a closest [nr;nf;od] vector taking into account the
185 	 * limitations like: 1) 700MHz <= Fvco <= 3.5GHz, 2) PLL Od is
186 	 * either 1 or even number within the acceptable range (alas 1s
187 	 * is also excluded by the next loop).
188 	 */
189 	for (; nri <= nr_max; ++nri) {
190 		/* Use Od factor to fulfill the limitation 2). */
191 		num = CCU_PLL_CLKOD_FACTOR * rate;
192 		denom = parent_rate / nri;
193 
194 		/*
195 		 * Make sure Fvco is within the acceptable range to fulfill
196 		 * the condition 1). Note due to the CCU_PLL_CLKOD_FACTOR value
197 		 * the actual upper limit is also divided by that factor.
198 		 * It's not big problem for us since practically there is no
199 		 * need in clocks with that high frequency.
200 		 */
201 		nf_max = min(CCU_PLL_FVCO_MAX / denom, CCU_PLL_NF_MAX);
202 		od_max = CCU_PLL_OD_MAX / CCU_PLL_CLKOD_FACTOR;
203 
204 		/*
205 		 * Bypass the out-of-bound values, which can't be properly
206 		 * handled by the rational fraction approximation algorithm.
207 		 */
208 		if (num / denom >= nf_max) {
209 			n1 = nf_max;
210 			d1 = 1;
211 		} else if (denom / num >= od_max) {
212 			n1 = 1;
213 			d1 = od_max;
214 		} else {
215 			rational_best_approximation(num, denom, nf_max, od_max,
216 						    &n1, &d1);
217 		}
218 
219 		/* Select the best approximation of the target rate. */
220 		freq = ccu_pll_calc_freq(parent_rate, nri, n1, d1);
221 		err = abs((int64_t)freq - num);
222 		if (err < min_err) {
223 			min_err = err;
224 			*nr = nri;
225 			*nf = n1;
226 			*od = CCU_PLL_CLKOD_FACTOR * d1;
227 		}
228 	}
229 }
230 
ccu_pll_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)231 static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
232 			       unsigned long *parent_rate)
233 {
234 	unsigned long nr = 1, nf = 1, od = 1;
235 
236 	ccu_pll_calc_factors(rate, *parent_rate, &nr, &nf, &od);
237 
238 	return ccu_pll_calc_freq(*parent_rate, nr, nf, od);
239 }
240 
241 /*
242  * This method is used for PLLs, which support the on-the-fly dividers
243  * adjustment. So there is no need in gating such clocks.
244  */
ccu_pll_set_rate_reset(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)245 static int ccu_pll_set_rate_reset(struct clk_hw *hw, unsigned long rate,
246 				  unsigned long parent_rate)
247 {
248 	struct ccu_pll *pll = to_ccu_pll(hw);
249 	unsigned long nr, nf, od;
250 	unsigned long flags;
251 	u32 mask, val;
252 	int ret;
253 
254 	ccu_pll_calc_factors(rate, parent_rate, &nr, &nf, &od);
255 
256 	mask = CCU_PLL_CTL_CLKR_MASK | CCU_PLL_CTL_CLKF_MASK |
257 	       CCU_PLL_CTL_CLKOD_MASK;
258 	val = FIELD_PREP(CCU_PLL_CTL_CLKR_MASK, nr - 1) |
259 	      FIELD_PREP(CCU_PLL_CTL_CLKF_MASK, nf - 1) |
260 	      FIELD_PREP(CCU_PLL_CTL_CLKOD_MASK, od - 1);
261 
262 	spin_lock_irqsave(&pll->lock, flags);
263 	regmap_update_bits(pll->sys_regs, pll->reg_ctl, mask, val);
264 	ret = ccu_pll_reset(pll, parent_rate, nr);
265 	spin_unlock_irqrestore(&pll->lock, flags);
266 	if (ret)
267 		pr_err("PLL '%s' reset timed out\n", clk_hw_get_name(hw));
268 
269 	return ret;
270 }
271 
272 /*
273  * This method is used for PLLs, which don't support the on-the-fly dividers
274  * adjustment. So the corresponding clocks are supposed to be gated first.
275  */
ccu_pll_set_rate_norst(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)276 static int ccu_pll_set_rate_norst(struct clk_hw *hw, unsigned long rate,
277 				  unsigned long parent_rate)
278 {
279 	struct ccu_pll *pll = to_ccu_pll(hw);
280 	unsigned long nr, nf, od;
281 	unsigned long flags;
282 	u32 mask, val;
283 
284 	ccu_pll_calc_factors(rate, parent_rate, &nr, &nf, &od);
285 
286 	/*
287 	 * Disable PLL if it was enabled by default or left enabled by the
288 	 * system bootloader.
289 	 */
290 	mask = CCU_PLL_CTL_CLKR_MASK | CCU_PLL_CTL_CLKF_MASK |
291 	       CCU_PLL_CTL_CLKOD_MASK | CCU_PLL_CTL_EN;
292 	val = FIELD_PREP(CCU_PLL_CTL_CLKR_MASK, nr - 1) |
293 	      FIELD_PREP(CCU_PLL_CTL_CLKF_MASK, nf - 1) |
294 	      FIELD_PREP(CCU_PLL_CTL_CLKOD_MASK, od - 1);
295 
296 	spin_lock_irqsave(&pll->lock, flags);
297 	regmap_update_bits(pll->sys_regs, pll->reg_ctl, mask, val);
298 	spin_unlock_irqrestore(&pll->lock, flags);
299 
300 	return 0;
301 }
302 
303 #ifdef CONFIG_DEBUG_FS
304 
305 struct ccu_pll_dbgfs_bit {
306 	struct ccu_pll *pll;
307 	const char *name;
308 	unsigned int reg;
309 	u32 mask;
310 };
311 
312 struct ccu_pll_dbgfs_fld {
313 	struct ccu_pll *pll;
314 	const char *name;
315 	unsigned int reg;
316 	unsigned int lsb;
317 	u32 mask;
318 	u32 min;
319 	u32 max;
320 };
321 
322 #define CCU_PLL_DBGFS_BIT_ATTR(_name, _reg, _mask)	\
323 	{						\
324 		.name = _name,				\
325 		.reg = _reg,				\
326 		.mask = _mask				\
327 	}
328 
329 #define CCU_PLL_DBGFS_FLD_ATTR(_name, _reg, _lsb, _mask, _min, _max)	\
330 	{								\
331 		.name = _name,						\
332 		.reg = _reg,						\
333 		.lsb = _lsb,						\
334 		.mask = _mask,						\
335 		.min = _min,						\
336 		.max = _max						\
337 	}
338 
339 static const struct ccu_pll_dbgfs_bit ccu_pll_bits[] = {
340 	CCU_PLL_DBGFS_BIT_ATTR("pll_en", CCU_PLL_CTL, CCU_PLL_CTL_EN),
341 	CCU_PLL_DBGFS_BIT_ATTR("pll_rst", CCU_PLL_CTL, CCU_PLL_CTL_RST),
342 	CCU_PLL_DBGFS_BIT_ATTR("pll_bypass", CCU_PLL_CTL, CCU_PLL_CTL_BYPASS),
343 	CCU_PLL_DBGFS_BIT_ATTR("pll_lock", CCU_PLL_CTL, CCU_PLL_CTL_LOCK)
344 };
345 
346 #define CCU_PLL_DBGFS_BIT_NUM	ARRAY_SIZE(ccu_pll_bits)
347 
348 static const struct ccu_pll_dbgfs_fld ccu_pll_flds[] = {
349 	CCU_PLL_DBGFS_FLD_ATTR("pll_nr", CCU_PLL_CTL, CCU_PLL_CTL_CLKR_FLD,
350 				CCU_PLL_CTL_CLKR_MASK, 1, CCU_PLL_NR_MAX),
351 	CCU_PLL_DBGFS_FLD_ATTR("pll_nf", CCU_PLL_CTL, CCU_PLL_CTL_CLKF_FLD,
352 				CCU_PLL_CTL_CLKF_MASK, 1, CCU_PLL_NF_MAX),
353 	CCU_PLL_DBGFS_FLD_ATTR("pll_od", CCU_PLL_CTL, CCU_PLL_CTL_CLKOD_FLD,
354 				CCU_PLL_CTL_CLKOD_MASK, 1, CCU_PLL_OD_MAX),
355 	CCU_PLL_DBGFS_FLD_ATTR("pll_nb", CCU_PLL_CTL1, CCU_PLL_CTL1_BWADJ_FLD,
356 				CCU_PLL_CTL1_BWADJ_MASK, 1, CCU_PLL_NB_MAX)
357 };
358 
359 #define CCU_PLL_DBGFS_FLD_NUM	ARRAY_SIZE(ccu_pll_flds)
360 
361 /*
362  * It can be dangerous to change the PLL settings behind clock framework back,
363  * therefore we don't provide any kernel config based compile time option for
364  * this feature to enable.
365  */
366 #undef CCU_PLL_ALLOW_WRITE_DEBUGFS
367 #ifdef CCU_PLL_ALLOW_WRITE_DEBUGFS
368 
ccu_pll_dbgfs_bit_set(void * priv,u64 val)369 static int ccu_pll_dbgfs_bit_set(void *priv, u64 val)
370 {
371 	const struct ccu_pll_dbgfs_bit *bit = priv;
372 	struct ccu_pll *pll = bit->pll;
373 	unsigned long flags;
374 
375 	spin_lock_irqsave(&pll->lock, flags);
376 	regmap_update_bits(pll->sys_regs, pll->reg_ctl + bit->reg,
377 			   bit->mask, val ? bit->mask : 0);
378 	spin_unlock_irqrestore(&pll->lock, flags);
379 
380 	return 0;
381 }
382 
ccu_pll_dbgfs_fld_set(void * priv,u64 val)383 static int ccu_pll_dbgfs_fld_set(void *priv, u64 val)
384 {
385 	struct ccu_pll_dbgfs_fld *fld = priv;
386 	struct ccu_pll *pll = fld->pll;
387 	unsigned long flags;
388 	u32 data;
389 
390 	val = clamp_t(u64, val, fld->min, fld->max);
391 	data = ((val - 1) << fld->lsb) & fld->mask;
392 
393 	spin_lock_irqsave(&pll->lock, flags);
394 	regmap_update_bits(pll->sys_regs, pll->reg_ctl + fld->reg, fld->mask,
395 			   data);
396 	spin_unlock_irqrestore(&pll->lock, flags);
397 
398 	return 0;
399 }
400 
401 #define ccu_pll_dbgfs_mode	0644
402 
403 #else /* !CCU_PLL_ALLOW_WRITE_DEBUGFS */
404 
405 #define ccu_pll_dbgfs_bit_set	NULL
406 #define ccu_pll_dbgfs_fld_set	NULL
407 #define ccu_pll_dbgfs_mode	0444
408 
409 #endif /* !CCU_PLL_ALLOW_WRITE_DEBUGFS */
410 
ccu_pll_dbgfs_bit_get(void * priv,u64 * val)411 static int ccu_pll_dbgfs_bit_get(void *priv, u64 *val)
412 {
413 	struct ccu_pll_dbgfs_bit *bit = priv;
414 	struct ccu_pll *pll = bit->pll;
415 	u32 data = 0;
416 
417 	regmap_read(pll->sys_regs, pll->reg_ctl + bit->reg, &data);
418 	*val = !!(data & bit->mask);
419 
420 	return 0;
421 }
422 DEFINE_DEBUGFS_ATTRIBUTE(ccu_pll_dbgfs_bit_fops,
423 	ccu_pll_dbgfs_bit_get, ccu_pll_dbgfs_bit_set, "%llu\n");
424 
ccu_pll_dbgfs_fld_get(void * priv,u64 * val)425 static int ccu_pll_dbgfs_fld_get(void *priv, u64 *val)
426 {
427 	struct ccu_pll_dbgfs_fld *fld = priv;
428 	struct ccu_pll *pll = fld->pll;
429 	u32 data = 0;
430 
431 	regmap_read(pll->sys_regs, pll->reg_ctl + fld->reg, &data);
432 	*val = ((data & fld->mask) >> fld->lsb) + 1;
433 
434 	return 0;
435 }
436 DEFINE_DEBUGFS_ATTRIBUTE(ccu_pll_dbgfs_fld_fops,
437 	ccu_pll_dbgfs_fld_get, ccu_pll_dbgfs_fld_set, "%llu\n");
438 
ccu_pll_debug_init(struct clk_hw * hw,struct dentry * dentry)439 static void ccu_pll_debug_init(struct clk_hw *hw, struct dentry *dentry)
440 {
441 	struct ccu_pll *pll = to_ccu_pll(hw);
442 	struct ccu_pll_dbgfs_bit *bits;
443 	struct ccu_pll_dbgfs_fld *flds;
444 	int idx;
445 
446 	bits = kcalloc(CCU_PLL_DBGFS_BIT_NUM, sizeof(*bits), GFP_KERNEL);
447 	if (!bits)
448 		return;
449 
450 	for (idx = 0; idx < CCU_PLL_DBGFS_BIT_NUM; ++idx) {
451 		bits[idx] = ccu_pll_bits[idx];
452 		bits[idx].pll = pll;
453 
454 		debugfs_create_file_unsafe(bits[idx].name, ccu_pll_dbgfs_mode,
455 					   dentry, &bits[idx],
456 					   &ccu_pll_dbgfs_bit_fops);
457 	}
458 
459 	flds = kcalloc(CCU_PLL_DBGFS_FLD_NUM, sizeof(*flds), GFP_KERNEL);
460 	if (!flds)
461 		return;
462 
463 	for (idx = 0; idx < CCU_PLL_DBGFS_FLD_NUM; ++idx) {
464 		flds[idx] = ccu_pll_flds[idx];
465 		flds[idx].pll = pll;
466 
467 		debugfs_create_file_unsafe(flds[idx].name, ccu_pll_dbgfs_mode,
468 					   dentry, &flds[idx],
469 					   &ccu_pll_dbgfs_fld_fops);
470 	}
471 }
472 
473 #else /* !CONFIG_DEBUG_FS */
474 
475 #define ccu_pll_debug_init NULL
476 
477 #endif /* !CONFIG_DEBUG_FS */
478 
479 static const struct clk_ops ccu_pll_gate_to_set_ops = {
480 	.enable = ccu_pll_enable,
481 	.disable = ccu_pll_disable,
482 	.is_enabled = ccu_pll_is_enabled,
483 	.recalc_rate = ccu_pll_recalc_rate,
484 	.round_rate = ccu_pll_round_rate,
485 	.set_rate = ccu_pll_set_rate_norst,
486 	.debug_init = ccu_pll_debug_init
487 };
488 
489 static const struct clk_ops ccu_pll_straight_set_ops = {
490 	.enable = ccu_pll_enable,
491 	.disable = ccu_pll_disable,
492 	.is_enabled = ccu_pll_is_enabled,
493 	.recalc_rate = ccu_pll_recalc_rate,
494 	.round_rate = ccu_pll_round_rate,
495 	.set_rate = ccu_pll_set_rate_reset,
496 	.debug_init = ccu_pll_debug_init
497 };
498 
ccu_pll_hw_register(const struct ccu_pll_init_data * pll_init)499 struct ccu_pll *ccu_pll_hw_register(const struct ccu_pll_init_data *pll_init)
500 {
501 	struct clk_parent_data parent_data = { };
502 	struct clk_init_data hw_init = { };
503 	struct ccu_pll *pll;
504 	int ret;
505 
506 	if (!pll_init)
507 		return ERR_PTR(-EINVAL);
508 
509 	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
510 	if (!pll)
511 		return ERR_PTR(-ENOMEM);
512 
513 	/*
514 	 * Note since Baikal-T1 System Controller registers are MMIO-backed
515 	 * we won't check the regmap IO operations return status, because it
516 	 * must be zero anyway.
517 	 */
518 	pll->hw.init = &hw_init;
519 	pll->reg_ctl = pll_init->base + CCU_PLL_CTL;
520 	pll->reg_ctl1 = pll_init->base + CCU_PLL_CTL1;
521 	pll->sys_regs = pll_init->sys_regs;
522 	pll->id = pll_init->id;
523 	spin_lock_init(&pll->lock);
524 
525 	hw_init.name = pll_init->name;
526 	hw_init.flags = pll_init->flags;
527 
528 	if (hw_init.flags & CLK_SET_RATE_GATE)
529 		hw_init.ops = &ccu_pll_gate_to_set_ops;
530 	else
531 		hw_init.ops = &ccu_pll_straight_set_ops;
532 
533 	if (!pll_init->parent_name) {
534 		ret = -EINVAL;
535 		goto err_free_pll;
536 	}
537 	parent_data.fw_name = pll_init->parent_name;
538 	hw_init.parent_data = &parent_data;
539 	hw_init.num_parents = 1;
540 
541 	ret = of_clk_hw_register(pll_init->np, &pll->hw);
542 	if (ret)
543 		goto err_free_pll;
544 
545 	return pll;
546 
547 err_free_pll:
548 	kfree(pll);
549 
550 	return ERR_PTR(ret);
551 }
552 
ccu_pll_hw_unregister(struct ccu_pll * pll)553 void ccu_pll_hw_unregister(struct ccu_pll *pll)
554 {
555 	clk_hw_unregister(&pll->hw);
556 
557 	kfree(pll);
558 }
559