1 /*
2  * Synopsys HSDK SDP Generic PLL clock driver
3  *
4  * Copyright (C) 2017 Synopsys
5  *
6  * This file is licensed under the terms of the GNU General Public
7  * License version 2. This program is licensed "as is" without any
8  * warranty of any kind, whether express or implied.
9  */
10 
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/io.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 
22 #define CGU_PLL_CTRL	0x000 /* ARC PLL control register */
23 #define CGU_PLL_STATUS	0x004 /* ARC PLL status register */
24 #define CGU_PLL_FMEAS	0x008 /* ARC PLL frequency measurement register */
25 #define CGU_PLL_MON	0x00C /* ARC PLL monitor register */
26 
27 #define CGU_PLL_CTRL_ODIV_SHIFT		2
28 #define CGU_PLL_CTRL_IDIV_SHIFT		4
29 #define CGU_PLL_CTRL_FBDIV_SHIFT	9
30 #define CGU_PLL_CTRL_BAND_SHIFT		20
31 
32 #define CGU_PLL_CTRL_ODIV_MASK		GENMASK(3, CGU_PLL_CTRL_ODIV_SHIFT)
33 #define CGU_PLL_CTRL_IDIV_MASK		GENMASK(8, CGU_PLL_CTRL_IDIV_SHIFT)
34 #define CGU_PLL_CTRL_FBDIV_MASK		GENMASK(15, CGU_PLL_CTRL_FBDIV_SHIFT)
35 
36 #define CGU_PLL_CTRL_PD			BIT(0)
37 #define CGU_PLL_CTRL_BYPASS		BIT(1)
38 
39 #define CGU_PLL_STATUS_LOCK		BIT(0)
40 #define CGU_PLL_STATUS_ERR		BIT(1)
41 
42 #define HSDK_PLL_MAX_LOCK_TIME		100 /* 100 us */
43 
44 #define CGU_PLL_SOURCE_MAX		1
45 
46 #define CORE_IF_CLK_THRESHOLD_HZ	500000000
47 #define CREG_CORE_IF_CLK_DIV_1		0x0
48 #define CREG_CORE_IF_CLK_DIV_2		0x1
49 
50 struct hsdk_pll_cfg {
51 	u32 rate;
52 	u32 idiv;
53 	u32 fbdiv;
54 	u32 odiv;
55 	u32 band;
56 	u32 bypass;
57 };
58 
59 static const struct hsdk_pll_cfg asdt_pll_cfg[] = {
60 	{ 100000000,  0, 11, 3, 0, 0 },
61 	{ 133000000,  0, 15, 3, 0, 0 },
62 	{ 200000000,  1, 47, 3, 0, 0 },
63 	{ 233000000,  1, 27, 2, 0, 0 },
64 	{ 300000000,  1, 35, 2, 0, 0 },
65 	{ 333000000,  1, 39, 2, 0, 0 },
66 	{ 400000000,  1, 47, 2, 0, 0 },
67 	{ 500000000,  0, 14, 1, 0, 0 },
68 	{ 600000000,  0, 17, 1, 0, 0 },
69 	{ 700000000,  0, 20, 1, 0, 0 },
70 	{ 800000000,  0, 23, 1, 0, 0 },
71 	{ 900000000,  1, 26, 0, 0, 0 },
72 	{ 1000000000, 1, 29, 0, 0, 0 },
73 	{ 1100000000, 1, 32, 0, 0, 0 },
74 	{ 1200000000, 1, 35, 0, 0, 0 },
75 	{ 1300000000, 1, 38, 0, 0, 0 },
76 	{ 1400000000, 1, 41, 0, 0, 0 },
77 	{ 1500000000, 1, 44, 0, 0, 0 },
78 	{ 1600000000, 1, 47, 0, 0, 0 },
79 	{}
80 };
81 
82 static const struct hsdk_pll_cfg hdmi_pll_cfg[] = {
83 	{ 27000000,   0, 0,  0, 0, 1 },
84 	{ 148500000,  0, 21, 3, 0, 0 },
85 	{ 297000000,  0, 21, 2, 0, 0 },
86 	{ 540000000,  0, 19, 1, 0, 0 },
87 	{ 594000000,  0, 21, 1, 0, 0 },
88 	{}
89 };
90 
91 struct hsdk_pll_clk {
92 	struct clk_hw hw;
93 	void __iomem *regs;
94 	void __iomem *spec_regs;
95 	const struct hsdk_pll_devdata *pll_devdata;
96 	struct device *dev;
97 };
98 
99 struct hsdk_pll_devdata {
100 	const struct hsdk_pll_cfg *pll_cfg;
101 	int (*update_rate)(struct hsdk_pll_clk *clk, unsigned long rate,
102 			   const struct hsdk_pll_cfg *cfg);
103 };
104 
105 static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *, unsigned long,
106 				     const struct hsdk_pll_cfg *);
107 static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *, unsigned long,
108 				     const struct hsdk_pll_cfg *);
109 
110 static const struct hsdk_pll_devdata core_pll_devdata = {
111 	.pll_cfg = asdt_pll_cfg,
112 	.update_rate = hsdk_pll_core_update_rate,
113 };
114 
115 static const struct hsdk_pll_devdata sdt_pll_devdata = {
116 	.pll_cfg = asdt_pll_cfg,
117 	.update_rate = hsdk_pll_comm_update_rate,
118 };
119 
120 static const struct hsdk_pll_devdata hdmi_pll_devdata = {
121 	.pll_cfg = hdmi_pll_cfg,
122 	.update_rate = hsdk_pll_comm_update_rate,
123 };
124 
hsdk_pll_write(struct hsdk_pll_clk * clk,u32 reg,u32 val)125 static inline void hsdk_pll_write(struct hsdk_pll_clk *clk, u32 reg, u32 val)
126 {
127 	iowrite32(val, clk->regs + reg);
128 }
129 
hsdk_pll_read(struct hsdk_pll_clk * clk,u32 reg)130 static inline u32 hsdk_pll_read(struct hsdk_pll_clk *clk, u32 reg)
131 {
132 	return ioread32(clk->regs + reg);
133 }
134 
hsdk_pll_set_cfg(struct hsdk_pll_clk * clk,const struct hsdk_pll_cfg * cfg)135 static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
136 				    const struct hsdk_pll_cfg *cfg)
137 {
138 	u32 val = 0;
139 
140 	if (cfg->bypass) {
141 		val = hsdk_pll_read(clk, CGU_PLL_CTRL);
142 		val |= CGU_PLL_CTRL_BYPASS;
143 	} else {
144 		/* Powerdown and Bypass bits should be cleared */
145 		val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
146 		val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
147 		val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
148 		val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
149 	}
150 
151 	dev_dbg(clk->dev, "write configuration: %#x\n", val);
152 
153 	hsdk_pll_write(clk, CGU_PLL_CTRL, val);
154 }
155 
hsdk_pll_is_locked(struct hsdk_pll_clk * clk)156 static inline bool hsdk_pll_is_locked(struct hsdk_pll_clk *clk)
157 {
158 	return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK);
159 }
160 
hsdk_pll_is_err(struct hsdk_pll_clk * clk)161 static inline bool hsdk_pll_is_err(struct hsdk_pll_clk *clk)
162 {
163 	return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR);
164 }
165 
to_hsdk_pll_clk(struct clk_hw * hw)166 static inline struct hsdk_pll_clk *to_hsdk_pll_clk(struct clk_hw *hw)
167 {
168 	return container_of(hw, struct hsdk_pll_clk, hw);
169 }
170 
hsdk_pll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)171 static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
172 					  unsigned long parent_rate)
173 {
174 	u32 val;
175 	u64 rate;
176 	u32 idiv, fbdiv, odiv;
177 	struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
178 
179 	val = hsdk_pll_read(clk, CGU_PLL_CTRL);
180 
181 	dev_dbg(clk->dev, "current configuration: %#x\n", val);
182 
183 	/* Check if PLL is bypassed */
184 	if (val & CGU_PLL_CTRL_BYPASS)
185 		return parent_rate;
186 
187 	/* Check if PLL is disabled */
188 	if (val & CGU_PLL_CTRL_PD)
189 		return 0;
190 
191 	/* input divider = reg.idiv + 1 */
192 	idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT);
193 	/* fb divider = 2*(reg.fbdiv + 1) */
194 	fbdiv = 2 * (1 + ((val & CGU_PLL_CTRL_FBDIV_MASK) >> CGU_PLL_CTRL_FBDIV_SHIFT));
195 	/* output divider = 2^(reg.odiv) */
196 	odiv = 1 << ((val & CGU_PLL_CTRL_ODIV_MASK) >> CGU_PLL_CTRL_ODIV_SHIFT);
197 
198 	rate = (u64)parent_rate * fbdiv;
199 	do_div(rate, idiv * odiv);
200 
201 	return rate;
202 }
203 
hsdk_pll_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)204 static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
205 				unsigned long *prate)
206 {
207 	int i;
208 	unsigned long best_rate;
209 	struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
210 	const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
211 
212 	if (pll_cfg[0].rate == 0)
213 		return -EINVAL;
214 
215 	best_rate = pll_cfg[0].rate;
216 
217 	for (i = 1; pll_cfg[i].rate != 0; i++) {
218 		if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
219 			best_rate = pll_cfg[i].rate;
220 	}
221 
222 	dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
223 
224 	return best_rate;
225 }
226 
hsdk_pll_comm_update_rate(struct hsdk_pll_clk * clk,unsigned long rate,const struct hsdk_pll_cfg * cfg)227 static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
228 				     unsigned long rate,
229 				     const struct hsdk_pll_cfg *cfg)
230 {
231 	hsdk_pll_set_cfg(clk, cfg);
232 
233 	/*
234 	 * Wait until CGU relocks and check error status.
235 	 * If after timeout CGU is unlocked yet return error.
236 	 */
237 	udelay(HSDK_PLL_MAX_LOCK_TIME);
238 	if (!hsdk_pll_is_locked(clk))
239 		return -ETIMEDOUT;
240 
241 	if (hsdk_pll_is_err(clk))
242 		return -EINVAL;
243 
244 	return 0;
245 }
246 
hsdk_pll_core_update_rate(struct hsdk_pll_clk * clk,unsigned long rate,const struct hsdk_pll_cfg * cfg)247 static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
248 				     unsigned long rate,
249 				     const struct hsdk_pll_cfg *cfg)
250 {
251 	/*
252 	 * When core clock exceeds 500MHz, the divider for the interface
253 	 * clock must be programmed to div-by-2.
254 	 */
255 	if (rate > CORE_IF_CLK_THRESHOLD_HZ)
256 		iowrite32(CREG_CORE_IF_CLK_DIV_2, clk->spec_regs);
257 
258 	hsdk_pll_set_cfg(clk, cfg);
259 
260 	/*
261 	 * Wait until CGU relocks and check error status.
262 	 * If after timeout CGU is unlocked yet return error.
263 	 */
264 	udelay(HSDK_PLL_MAX_LOCK_TIME);
265 	if (!hsdk_pll_is_locked(clk))
266 		return -ETIMEDOUT;
267 
268 	if (hsdk_pll_is_err(clk))
269 		return -EINVAL;
270 
271 	/*
272 	 * Program divider to div-by-1 if we succesfuly set core clock below
273 	 * 500MHz threshold.
274 	 */
275 	if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
276 		iowrite32(CREG_CORE_IF_CLK_DIV_1, clk->spec_regs);
277 
278 	return 0;
279 }
280 
hsdk_pll_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)281 static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
282 			     unsigned long parent_rate)
283 {
284 	int i;
285 	struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
286 	const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
287 
288 	for (i = 0; pll_cfg[i].rate != 0; i++) {
289 		if (pll_cfg[i].rate == rate) {
290 			return clk->pll_devdata->update_rate(clk, rate,
291 							     &pll_cfg[i]);
292 		}
293 	}
294 
295 	dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
296 			parent_rate);
297 
298 	return -EINVAL;
299 }
300 
301 static const struct clk_ops hsdk_pll_ops = {
302 	.recalc_rate = hsdk_pll_recalc_rate,
303 	.round_rate = hsdk_pll_round_rate,
304 	.set_rate = hsdk_pll_set_rate,
305 };
306 
hsdk_pll_clk_probe(struct platform_device * pdev)307 static int hsdk_pll_clk_probe(struct platform_device *pdev)
308 {
309 	int ret;
310 	struct resource *mem;
311 	const char *parent_name;
312 	unsigned int num_parents;
313 	struct hsdk_pll_clk *pll_clk;
314 	struct clk_init_data init = { };
315 	struct device *dev = &pdev->dev;
316 
317 	pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
318 	if (!pll_clk)
319 		return -ENOMEM;
320 
321 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
322 	pll_clk->regs = devm_ioremap_resource(dev, mem);
323 	if (IS_ERR(pll_clk->regs))
324 		return PTR_ERR(pll_clk->regs);
325 
326 	init.name = dev->of_node->name;
327 	init.ops = &hsdk_pll_ops;
328 	parent_name = of_clk_get_parent_name(dev->of_node, 0);
329 	init.parent_names = &parent_name;
330 	num_parents = of_clk_get_parent_count(dev->of_node);
331 	if (num_parents == 0 || num_parents > CGU_PLL_SOURCE_MAX) {
332 		dev_err(dev, "wrong clock parents number: %u\n", num_parents);
333 		return -EINVAL;
334 	}
335 	init.num_parents = num_parents;
336 
337 	pll_clk->hw.init = &init;
338 	pll_clk->dev = dev;
339 	pll_clk->pll_devdata = of_device_get_match_data(dev);
340 
341 	if (!pll_clk->pll_devdata) {
342 		dev_err(dev, "No OF match data provided\n");
343 		return -EINVAL;
344 	}
345 
346 	ret = devm_clk_hw_register(dev, &pll_clk->hw);
347 	if (ret) {
348 		dev_err(dev, "failed to register %s clock\n", init.name);
349 		return ret;
350 	}
351 
352 	return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get,
353 			&pll_clk->hw);
354 }
355 
hsdk_pll_clk_remove(struct platform_device * pdev)356 static int hsdk_pll_clk_remove(struct platform_device *pdev)
357 {
358 	of_clk_del_provider(pdev->dev.of_node);
359 	return 0;
360 }
361 
of_hsdk_pll_clk_setup(struct device_node * node)362 static void __init of_hsdk_pll_clk_setup(struct device_node *node)
363 {
364 	int ret;
365 	const char *parent_name;
366 	unsigned int num_parents;
367 	struct hsdk_pll_clk *pll_clk;
368 	struct clk_init_data init = { };
369 
370 	pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
371 	if (!pll_clk)
372 		return;
373 
374 	pll_clk->regs = of_iomap(node, 0);
375 	if (!pll_clk->regs) {
376 		pr_err("failed to map pll registers\n");
377 		goto err_free_pll_clk;
378 	}
379 
380 	pll_clk->spec_regs = of_iomap(node, 1);
381 	if (!pll_clk->spec_regs) {
382 		pr_err("failed to map pll registers\n");
383 		goto err_unmap_comm_regs;
384 	}
385 
386 	init.name = node->name;
387 	init.ops = &hsdk_pll_ops;
388 	parent_name = of_clk_get_parent_name(node, 0);
389 	init.parent_names = &parent_name;
390 	num_parents = of_clk_get_parent_count(node);
391 	if (num_parents > CGU_PLL_SOURCE_MAX) {
392 		pr_err("too much clock parents: %u\n", num_parents);
393 		goto err_unmap_spec_regs;
394 	}
395 	init.num_parents = num_parents;
396 
397 	pll_clk->hw.init = &init;
398 	pll_clk->pll_devdata = &core_pll_devdata;
399 
400 	ret = clk_hw_register(NULL, &pll_clk->hw);
401 	if (ret) {
402 		pr_err("failed to register %pOFn clock\n", node);
403 		goto err_unmap_spec_regs;
404 	}
405 
406 	ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
407 	if (ret) {
408 		pr_err("failed to add hw provider for %pOFn clock\n", node);
409 		goto err_unmap_spec_regs;
410 	}
411 
412 	return;
413 
414 err_unmap_spec_regs:
415 	iounmap(pll_clk->spec_regs);
416 err_unmap_comm_regs:
417 	iounmap(pll_clk->regs);
418 err_free_pll_clk:
419 	kfree(pll_clk);
420 }
421 
422 /* Core PLL needed early for ARC cpus timers */
423 CLK_OF_DECLARE(hsdk_pll_clock, "snps,hsdk-core-pll-clock",
424 of_hsdk_pll_clk_setup);
425 
426 static const struct of_device_id hsdk_pll_clk_id[] = {
427 	{ .compatible = "snps,hsdk-gp-pll-clock", .data = &sdt_pll_devdata},
428 	{ .compatible = "snps,hsdk-hdmi-pll-clock", .data = &hdmi_pll_devdata},
429 	{ }
430 };
431 
432 static struct platform_driver hsdk_pll_clk_driver = {
433 	.driver = {
434 		.name = "hsdk-gp-pll-clock",
435 		.of_match_table = hsdk_pll_clk_id,
436 	},
437 	.probe = hsdk_pll_clk_probe,
438 	.remove = hsdk_pll_clk_remove,
439 };
440 builtin_platform_driver(hsdk_pll_clk_driver);
441