]> git.ipfire.org Git - people/ms/linux.git/blob - drivers/clk/pxa/clk-pxa.c
83118036006941cd10b643d7d055bace7d9010ef
[people/ms/linux.git] / drivers / clk / pxa / clk-pxa.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Marvell PXA family clocks
4 *
5 * Copyright (C) 2014 Robert Jarzmik
6 *
7 * Common clock code for PXA clocks ("CKEN" type clocks + DT)
8 */
9 #include <linux/clk.h>
10 #include <linux/clk-provider.h>
11 #include <linux/clkdev.h>
12 #include <linux/io.h>
13 #include <linux/of.h>
14 #include <linux/soc/pxa/smemc.h>
15
16 #include <dt-bindings/clock/pxa-clock.h>
17 #include "clk-pxa.h"
18
19 #define KHz 1000
20 #define MHz (1000 * 1000)
21
22 #define MDREFR_K0DB4 (1 << 29) /* SDCLK0 Divide by 4 Control/Status */
23 #define MDREFR_K2FREE (1 << 25) /* SDRAM Free-Running Control */
24 #define MDREFR_K1FREE (1 << 24) /* SDRAM Free-Running Control */
25 #define MDREFR_K0FREE (1 << 23) /* SDRAM Free-Running Control */
26 #define MDREFR_SLFRSH (1 << 22) /* SDRAM Self-Refresh Control/Status */
27 #define MDREFR_APD (1 << 20) /* SDRAM/SSRAM Auto-Power-Down Enable */
28 #define MDREFR_K2DB2 (1 << 19) /* SDCLK2 Divide by 2 Control/Status */
29 #define MDREFR_K2RUN (1 << 18) /* SDCLK2 Run Control/Status */
30 #define MDREFR_K1DB2 (1 << 17) /* SDCLK1 Divide by 2 Control/Status */
31 #define MDREFR_K1RUN (1 << 16) /* SDCLK1 Run Control/Status */
32 #define MDREFR_E1PIN (1 << 15) /* SDCKE1 Level Control/Status */
33 #define MDREFR_K0DB2 (1 << 14) /* SDCLK0 Divide by 2 Control/Status */
34 #define MDREFR_K0RUN (1 << 13) /* SDCLK0 Run Control/Status */
35 #define MDREFR_E0PIN (1 << 12) /* SDCKE0 Level Control/Status */
36 #define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2)
37 #define MDREFR_DRI_MASK 0xFFF
38
39 static DEFINE_SPINLOCK(pxa_clk_lock);
40
41 static struct clk *pxa_clocks[CLK_MAX];
42 static struct clk_onecell_data onecell_data = {
43 .clks = pxa_clocks,
44 .clk_num = CLK_MAX,
45 };
46
47 struct pxa_clk {
48 struct clk_hw hw;
49 struct clk_fixed_factor lp;
50 struct clk_fixed_factor hp;
51 struct clk_gate gate;
52 bool (*is_in_low_power)(void);
53 };
54
55 #define to_pxa_clk(_hw) container_of(_hw, struct pxa_clk, hw)
56
57 static unsigned long cken_recalc_rate(struct clk_hw *hw,
58 unsigned long parent_rate)
59 {
60 struct pxa_clk *pclk = to_pxa_clk(hw);
61 struct clk_fixed_factor *fix;
62
63 if (!pclk->is_in_low_power || pclk->is_in_low_power())
64 fix = &pclk->lp;
65 else
66 fix = &pclk->hp;
67 __clk_hw_set_clk(&fix->hw, hw);
68 return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate);
69 }
70
71 static const struct clk_ops cken_rate_ops = {
72 .recalc_rate = cken_recalc_rate,
73 };
74
75 static u8 cken_get_parent(struct clk_hw *hw)
76 {
77 struct pxa_clk *pclk = to_pxa_clk(hw);
78
79 if (!pclk->is_in_low_power)
80 return 0;
81 return pclk->is_in_low_power() ? 0 : 1;
82 }
83
84 static const struct clk_ops cken_mux_ops = {
85 .get_parent = cken_get_parent,
86 .set_parent = dummy_clk_set_parent,
87 };
88
89 void __init clkdev_pxa_register(int ckid, const char *con_id,
90 const char *dev_id, struct clk *clk)
91 {
92 if (!IS_ERR(clk) && (ckid != CLK_NONE))
93 pxa_clocks[ckid] = clk;
94 if (!IS_ERR(clk))
95 clk_register_clkdev(clk, con_id, dev_id);
96 }
97
98 int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks)
99 {
100 int i;
101 struct pxa_clk *pxa_clk;
102 struct clk *clk;
103
104 for (i = 0; i < nb_clks; i++) {
105 pxa_clk = kzalloc(sizeof(*pxa_clk), GFP_KERNEL);
106 pxa_clk->is_in_low_power = clks[i].is_in_low_power;
107 pxa_clk->lp = clks[i].lp;
108 pxa_clk->hp = clks[i].hp;
109 pxa_clk->gate = clks[i].gate;
110 pxa_clk->gate.lock = &pxa_clk_lock;
111 clk = clk_register_composite(NULL, clks[i].name,
112 clks[i].parent_names, 2,
113 &pxa_clk->hw, &cken_mux_ops,
114 &pxa_clk->hw, &cken_rate_ops,
115 &pxa_clk->gate.hw, &clk_gate_ops,
116 clks[i].flags);
117 clkdev_pxa_register(clks[i].ckid, clks[i].con_id,
118 clks[i].dev_id, clk);
119 }
120 return 0;
121 }
122
123 void __init clk_pxa_dt_common_init(struct device_node *np)
124 {
125 of_clk_add_provider(np, of_clk_src_onecell_get, &onecell_data);
126 }
127
128 void pxa2xx_core_turbo_switch(bool on)
129 {
130 unsigned long flags;
131 unsigned int unused, clkcfg;
132
133 local_irq_save(flags);
134
135 asm("mrc p14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
136 clkcfg &= ~CLKCFG_TURBO & ~CLKCFG_HALFTURBO;
137 if (on)
138 clkcfg |= CLKCFG_TURBO;
139 clkcfg |= CLKCFG_FCS;
140
141 asm volatile(
142 " b 2f\n"
143 " .align 5\n"
144 "1: mcr p14, 0, %1, c6, c0, 0\n"
145 " b 3f\n"
146 "2: b 1b\n"
147 "3: nop\n"
148 : "=&r" (unused) : "r" (clkcfg));
149
150 local_irq_restore(flags);
151 }
152
153 void pxa2xx_cpll_change(struct pxa2xx_freq *freq,
154 u32 (*mdrefr_dri)(unsigned int),
155 void __iomem *cccr)
156 {
157 unsigned int clkcfg = freq->clkcfg;
158 unsigned int unused, preset_mdrefr, postset_mdrefr;
159 unsigned long flags;
160 void __iomem *mdrefr = pxa_smemc_get_mdrefr();
161
162 local_irq_save(flags);
163
164 /* Calculate the next MDREFR. If we're slowing down the SDRAM clock
165 * we need to preset the smaller DRI before the change. If we're
166 * speeding up we need to set the larger DRI value after the change.
167 */
168 preset_mdrefr = postset_mdrefr = readl(mdrefr);
169 if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(freq->membus_khz)) {
170 preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK);
171 preset_mdrefr |= mdrefr_dri(freq->membus_khz);
172 }
173 postset_mdrefr =
174 (postset_mdrefr & ~MDREFR_DRI_MASK) |
175 mdrefr_dri(freq->membus_khz);
176
177 /* If we're dividing the memory clock by two for the SDRAM clock, this
178 * must be set prior to the change. Clearing the divide must be done
179 * after the change.
180 */
181 if (freq->div2) {
182 preset_mdrefr |= MDREFR_DB2_MASK;
183 postset_mdrefr |= MDREFR_DB2_MASK;
184 } else {
185 postset_mdrefr &= ~MDREFR_DB2_MASK;
186 }
187
188 /* Set new the CCCR and prepare CLKCFG */
189 writel(freq->cccr, cccr);
190
191 asm volatile(
192 " ldr r4, [%1]\n"
193 " b 2f\n"
194 " .align 5\n"
195 "1: str %3, [%1] /* preset the MDREFR */\n"
196 " mcr p14, 0, %2, c6, c0, 0 /* set CLKCFG[FCS] */\n"
197 " str %4, [%1] /* postset the MDREFR */\n"
198 " b 3f\n"
199 "2: b 1b\n"
200 "3: nop\n"
201 : "=&r" (unused)
202 : "r" (mdrefr), "r" (clkcfg), "r" (preset_mdrefr),
203 "r" (postset_mdrefr)
204 : "r4", "r5");
205
206 local_irq_restore(flags);
207 }
208
209 int pxa2xx_determine_rate(struct clk_rate_request *req,
210 struct pxa2xx_freq *freqs, int nb_freqs)
211 {
212 int i, closest_below = -1, closest_above = -1;
213 unsigned long rate;
214
215 for (i = 0; i < nb_freqs; i++) {
216 rate = freqs[i].cpll;
217 if (rate == req->rate)
218 break;
219 if (rate < req->min_rate)
220 continue;
221 if (rate > req->max_rate)
222 continue;
223 if (rate <= req->rate)
224 closest_below = i;
225 if ((rate >= req->rate) && (closest_above == -1))
226 closest_above = i;
227 }
228
229 req->best_parent_hw = NULL;
230
231 if (i < nb_freqs) {
232 rate = req->rate;
233 } else if (closest_below >= 0) {
234 rate = freqs[closest_below].cpll;
235 } else if (closest_above >= 0) {
236 rate = freqs[closest_above].cpll;
237 } else {
238 pr_debug("%s(rate=%lu) no match\n", __func__, req->rate);
239 return -EINVAL;
240 }
241
242 pr_debug("%s(rate=%lu) rate=%lu\n", __func__, req->rate, rate);
243 req->rate = rate;
244
245 return 0;
246 }