1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * R-Car Gen3 Clock Pulse Generator Library
4  *
5  * Copyright (C) 2015-2018 Glider bvba
6  * Copyright (C) 2019 Renesas Electronics Corp.
7  *
8  * Based on clk-rcar-gen3.c
9  *
10  * Copyright (C) 2015 Renesas Electronics Corp.
11  */
12 
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/device.h>
16 #include <linux/err.h>
17 #include <linux/init.h>
18 #include <linux/io.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/sys_soc.h>
22 
23 #include "rcar-cpg-lib.h"
24 
25 spinlock_t cpg_lock;
26 
cpg_reg_modify(void __iomem * reg,u32 clear,u32 set)27 void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set)
28 {
29 	unsigned long flags;
30 	u32 val;
31 
32 	spin_lock_irqsave(&cpg_lock, flags);
33 	val = readl(reg);
34 	val &= ~clear;
35 	val |= set;
36 	writel(val, reg);
37 	spin_unlock_irqrestore(&cpg_lock, flags);
38 };
39 
cpg_simple_notifier_call(struct notifier_block * nb,unsigned long action,void * data)40 static int cpg_simple_notifier_call(struct notifier_block *nb,
41 				    unsigned long action, void *data)
42 {
43 	struct cpg_simple_notifier *csn =
44 		container_of(nb, struct cpg_simple_notifier, nb);
45 
46 	switch (action) {
47 	case PM_EVENT_SUSPEND:
48 		csn->saved = readl(csn->reg);
49 		return NOTIFY_OK;
50 
51 	case PM_EVENT_RESUME:
52 		writel(csn->saved, csn->reg);
53 		return NOTIFY_OK;
54 	}
55 	return NOTIFY_DONE;
56 }
57 
cpg_simple_notifier_register(struct raw_notifier_head * notifiers,struct cpg_simple_notifier * csn)58 void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
59 				  struct cpg_simple_notifier *csn)
60 {
61 	csn->nb.notifier_call = cpg_simple_notifier_call;
62 	raw_notifier_chain_register(notifiers, &csn->nb);
63 }
64 
65 /*
66  * SDn Clock
67  */
68 #define CPG_SD_STP_HCK		BIT(9)
69 #define CPG_SD_STP_CK		BIT(8)
70 
71 #define CPG_SD_STP_MASK		(CPG_SD_STP_HCK | CPG_SD_STP_CK)
72 #define CPG_SD_FC_MASK		(0x7 << 2 | 0x3 << 0)
73 
74 #define CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) \
75 { \
76 	.val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
77 	       ((sd_srcfc) << 2) | \
78 	       ((sd_fc) << 0), \
79 	.div = (sd_div), \
80 }
81 
82 struct sd_div_table {
83 	u32 val;
84 	unsigned int div;
85 };
86 
87 struct sd_clock {
88 	struct clk_hw hw;
89 	const struct sd_div_table *div_table;
90 	struct cpg_simple_notifier csn;
91 	unsigned int div_num;
92 	unsigned int cur_div_idx;
93 };
94 
95 /* SDn divider
96  *           sd_srcfc   sd_fc   div
97  * stp_hck   (div)      (div)     = sd_srcfc x sd_fc
98  *---------------------------------------------------------
99  *  0         0 (1)      1 (4)      4 : SDR104 / HS200 / HS400 (8 TAP)
100  *  0         1 (2)      1 (4)      8 : SDR50
101  *  1         2 (4)      1 (4)     16 : HS / SDR25
102  *  1         3 (8)      1 (4)     32 : NS / SDR12
103  *  1         4 (16)     1 (4)     64
104  *  0         0 (1)      0 (2)      2
105  *  0         1 (2)      0 (2)      4 : SDR104 / HS200 / HS400 (4 TAP)
106  *  1         2 (4)      0 (2)      8
107  *  1         3 (8)      0 (2)     16
108  *  1         4 (16)     0 (2)     32
109  *
110  *  NOTE: There is a quirk option to ignore the first row of the dividers
111  *  table when searching for suitable settings. This is because HS400 on
112  *  early ES versions of H3 and M3-W requires a specific setting to work.
113  */
114 static const struct sd_div_table cpg_sd_div_table[] = {
115 /*	CPG_SD_DIV_TABLE_DATA(stp_hck,  sd_srcfc,   sd_fc,  sd_div) */
116 	CPG_SD_DIV_TABLE_DATA(0,        0,          1,        4),
117 	CPG_SD_DIV_TABLE_DATA(0,        1,          1,        8),
118 	CPG_SD_DIV_TABLE_DATA(1,        2,          1,       16),
119 	CPG_SD_DIV_TABLE_DATA(1,        3,          1,       32),
120 	CPG_SD_DIV_TABLE_DATA(1,        4,          1,       64),
121 	CPG_SD_DIV_TABLE_DATA(0,        0,          0,        2),
122 	CPG_SD_DIV_TABLE_DATA(0,        1,          0,        4),
123 	CPG_SD_DIV_TABLE_DATA(1,        2,          0,        8),
124 	CPG_SD_DIV_TABLE_DATA(1,        3,          0,       16),
125 	CPG_SD_DIV_TABLE_DATA(1,        4,          0,       32),
126 };
127 
128 #define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
129 
cpg_sd_clock_enable(struct clk_hw * hw)130 static int cpg_sd_clock_enable(struct clk_hw *hw)
131 {
132 	struct sd_clock *clock = to_sd_clock(hw);
133 
134 	cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK,
135 		       clock->div_table[clock->cur_div_idx].val &
136 		       CPG_SD_STP_MASK);
137 
138 	return 0;
139 }
140 
cpg_sd_clock_disable(struct clk_hw * hw)141 static void cpg_sd_clock_disable(struct clk_hw *hw)
142 {
143 	struct sd_clock *clock = to_sd_clock(hw);
144 
145 	cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK);
146 }
147 
cpg_sd_clock_is_enabled(struct clk_hw * hw)148 static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
149 {
150 	struct sd_clock *clock = to_sd_clock(hw);
151 
152 	return !(readl(clock->csn.reg) & CPG_SD_STP_MASK);
153 }
154 
cpg_sd_clock_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)155 static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
156 						unsigned long parent_rate)
157 {
158 	struct sd_clock *clock = to_sd_clock(hw);
159 
160 	return DIV_ROUND_CLOSEST(parent_rate,
161 				 clock->div_table[clock->cur_div_idx].div);
162 }
163 
cpg_sd_clock_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)164 static int cpg_sd_clock_determine_rate(struct clk_hw *hw,
165 				       struct clk_rate_request *req)
166 {
167 	unsigned long best_rate = ULONG_MAX, diff_min = ULONG_MAX;
168 	struct sd_clock *clock = to_sd_clock(hw);
169 	unsigned long calc_rate, diff;
170 	unsigned int i;
171 
172 	for (i = 0; i < clock->div_num; i++) {
173 		calc_rate = DIV_ROUND_CLOSEST(req->best_parent_rate,
174 					      clock->div_table[i].div);
175 		if (calc_rate < req->min_rate || calc_rate > req->max_rate)
176 			continue;
177 
178 		diff = calc_rate > req->rate ? calc_rate - req->rate
179 					     : req->rate - calc_rate;
180 		if (diff < diff_min) {
181 			best_rate = calc_rate;
182 			diff_min = diff;
183 		}
184 	}
185 
186 	if (best_rate == ULONG_MAX)
187 		return -EINVAL;
188 
189 	req->rate = best_rate;
190 	return 0;
191 }
192 
cpg_sd_clock_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)193 static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
194 				 unsigned long parent_rate)
195 {
196 	struct sd_clock *clock = to_sd_clock(hw);
197 	unsigned int i;
198 
199 	for (i = 0; i < clock->div_num; i++)
200 		if (rate == DIV_ROUND_CLOSEST(parent_rate,
201 					      clock->div_table[i].div))
202 			break;
203 
204 	if (i >= clock->div_num)
205 		return -EINVAL;
206 
207 	clock->cur_div_idx = i;
208 
209 	cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK,
210 		       clock->div_table[i].val &
211 		       (CPG_SD_STP_MASK | CPG_SD_FC_MASK));
212 
213 	return 0;
214 }
215 
216 static const struct clk_ops cpg_sd_clock_ops = {
217 	.enable = cpg_sd_clock_enable,
218 	.disable = cpg_sd_clock_disable,
219 	.is_enabled = cpg_sd_clock_is_enabled,
220 	.recalc_rate = cpg_sd_clock_recalc_rate,
221 	.determine_rate = cpg_sd_clock_determine_rate,
222 	.set_rate = cpg_sd_clock_set_rate,
223 };
224 
cpg_sd_clk_register(const char * name,void __iomem * base,unsigned int offset,const char * parent_name,struct raw_notifier_head * notifiers,bool skip_first)225 struct clk * __init cpg_sd_clk_register(const char *name,
226 	void __iomem *base, unsigned int offset, const char *parent_name,
227 	struct raw_notifier_head *notifiers, bool skip_first)
228 {
229 	struct clk_init_data init = {};
230 	struct sd_clock *clock;
231 	struct clk *clk;
232 	u32 val;
233 
234 	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
235 	if (!clock)
236 		return ERR_PTR(-ENOMEM);
237 
238 	init.name = name;
239 	init.ops = &cpg_sd_clock_ops;
240 	init.flags = CLK_SET_RATE_PARENT;
241 	init.parent_names = &parent_name;
242 	init.num_parents = 1;
243 
244 	clock->csn.reg = base + offset;
245 	clock->hw.init = &init;
246 	clock->div_table = cpg_sd_div_table;
247 	clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
248 
249 	if (skip_first) {
250 		clock->div_table++;
251 		clock->div_num--;
252 	}
253 
254 	val = readl(clock->csn.reg) & ~CPG_SD_FC_MASK;
255 	val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK);
256 	writel(val, clock->csn.reg);
257 
258 	clk = clk_register(NULL, &clock->hw);
259 	if (IS_ERR(clk))
260 		goto free_clock;
261 
262 	cpg_simple_notifier_register(notifiers, &clock->csn);
263 	return clk;
264 
265 free_clock:
266 	kfree(clock);
267 	return clk;
268 }
269 
270 struct rpc_clock {
271 	struct clk_divider div;
272 	struct clk_gate gate;
273 	/*
274 	 * One notifier covers both RPC and RPCD2 clocks as they are both
275 	 * controlled by the same RPCCKCR register...
276 	 */
277 	struct cpg_simple_notifier csn;
278 };
279 
280 static const struct clk_div_table cpg_rpc_div_table[] = {
281 	{ 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 }, { 0, 0 },
282 };
283 
cpg_rpc_clk_register(const char * name,void __iomem * rpcckcr,const char * parent_name,struct raw_notifier_head * notifiers)284 struct clk * __init cpg_rpc_clk_register(const char *name,
285 	void __iomem *rpcckcr, const char *parent_name,
286 	struct raw_notifier_head *notifiers)
287 {
288 	struct rpc_clock *rpc;
289 	struct clk *clk;
290 
291 	rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
292 	if (!rpc)
293 		return ERR_PTR(-ENOMEM);
294 
295 	rpc->div.reg = rpcckcr;
296 	rpc->div.width = 3;
297 	rpc->div.table = cpg_rpc_div_table;
298 	rpc->div.lock = &cpg_lock;
299 
300 	rpc->gate.reg = rpcckcr;
301 	rpc->gate.bit_idx = 8;
302 	rpc->gate.flags = CLK_GATE_SET_TO_DISABLE;
303 	rpc->gate.lock = &cpg_lock;
304 
305 	rpc->csn.reg = rpcckcr;
306 
307 	clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
308 				     &rpc->div.hw,  &clk_divider_ops,
309 				     &rpc->gate.hw, &clk_gate_ops,
310 				     CLK_SET_RATE_PARENT);
311 	if (IS_ERR(clk)) {
312 		kfree(rpc);
313 		return clk;
314 	}
315 
316 	cpg_simple_notifier_register(notifiers, &rpc->csn);
317 	return clk;
318 }
319 
320 struct rpcd2_clock {
321 	struct clk_fixed_factor fixed;
322 	struct clk_gate gate;
323 };
324 
cpg_rpcd2_clk_register(const char * name,void __iomem * rpcckcr,const char * parent_name)325 struct clk * __init cpg_rpcd2_clk_register(const char *name,
326 					   void __iomem *rpcckcr,
327 					   const char *parent_name)
328 {
329 	struct rpcd2_clock *rpcd2;
330 	struct clk *clk;
331 
332 	rpcd2 = kzalloc(sizeof(*rpcd2), GFP_KERNEL);
333 	if (!rpcd2)
334 		return ERR_PTR(-ENOMEM);
335 
336 	rpcd2->fixed.mult = 1;
337 	rpcd2->fixed.div = 2;
338 
339 	rpcd2->gate.reg = rpcckcr;
340 	rpcd2->gate.bit_idx = 9;
341 	rpcd2->gate.flags = CLK_GATE_SET_TO_DISABLE;
342 	rpcd2->gate.lock = &cpg_lock;
343 
344 	clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
345 				     &rpcd2->fixed.hw, &clk_fixed_factor_ops,
346 				     &rpcd2->gate.hw, &clk_gate_ops,
347 				     CLK_SET_RATE_PARENT);
348 	if (IS_ERR(clk))
349 		kfree(rpcd2);
350 
351 	return clk;
352 }
353 
354