]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/clk/x86/clk-pmc-atom.c
Merge tag 'fbdev-v4.14' of git://github.com/bzolnier/linux
[thirdparty/kernel/stable.git] / drivers / clk / x86 / clk-pmc-atom.c
1 /*
2 * Intel Atom platform clocks driver for BayTrail and CherryTrail SoCs
3 *
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Irina Tirdea <irina.tirdea@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17 #include <linux/clk-provider.h>
18 #include <linux/clkdev.h>
19 #include <linux/err.h>
20 #include <linux/platform_data/x86/clk-pmc-atom.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
23
24 #define PLT_CLK_NAME_BASE "pmc_plt_clk"
25
26 #define PMC_CLK_CTL_OFFSET 0x60
27 #define PMC_CLK_CTL_SIZE 4
28 #define PMC_CLK_NUM 6
29 #define PMC_CLK_CTL_GATED_ON_D3 0x0
30 #define PMC_CLK_CTL_FORCE_ON 0x1
31 #define PMC_CLK_CTL_FORCE_OFF 0x2
32 #define PMC_CLK_CTL_RESERVED 0x3
33 #define PMC_MASK_CLK_CTL GENMASK(1, 0)
34 #define PMC_MASK_CLK_FREQ BIT(2)
35 #define PMC_CLK_FREQ_XTAL (0 << 2) /* 25 MHz */
36 #define PMC_CLK_FREQ_PLL (1 << 2) /* 19.2 MHz */
37
38 struct clk_plt_fixed {
39 struct clk_hw *clk;
40 struct clk_lookup *lookup;
41 };
42
43 struct clk_plt {
44 struct clk_hw hw;
45 void __iomem *reg;
46 struct clk_lookup *lookup;
47 /* protect access to PMC registers */
48 spinlock_t lock;
49 };
50
51 #define to_clk_plt(_hw) container_of(_hw, struct clk_plt, hw)
52
53 struct clk_plt_data {
54 struct clk_plt_fixed **parents;
55 u8 nparents;
56 struct clk_plt *clks[PMC_CLK_NUM];
57 struct clk_lookup *mclk_lookup;
58 };
59
60 /* Return an index in parent table */
61 static inline int plt_reg_to_parent(int reg)
62 {
63 switch (reg & PMC_MASK_CLK_FREQ) {
64 default:
65 case PMC_CLK_FREQ_XTAL:
66 return 0;
67 case PMC_CLK_FREQ_PLL:
68 return 1;
69 }
70 }
71
72 /* Return clk index of parent */
73 static inline int plt_parent_to_reg(int index)
74 {
75 switch (index) {
76 default:
77 case 0:
78 return PMC_CLK_FREQ_XTAL;
79 case 1:
80 return PMC_CLK_FREQ_PLL;
81 }
82 }
83
84 /* Abstract status in simpler enabled/disabled value */
85 static inline int plt_reg_to_enabled(int reg)
86 {
87 switch (reg & PMC_MASK_CLK_CTL) {
88 case PMC_CLK_CTL_GATED_ON_D3:
89 case PMC_CLK_CTL_FORCE_ON:
90 return 1; /* enabled */
91 case PMC_CLK_CTL_FORCE_OFF:
92 case PMC_CLK_CTL_RESERVED:
93 default:
94 return 0; /* disabled */
95 }
96 }
97
98 static void plt_clk_reg_update(struct clk_plt *clk, u32 mask, u32 val)
99 {
100 u32 tmp;
101 unsigned long flags;
102
103 spin_lock_irqsave(&clk->lock, flags);
104
105 tmp = readl(clk->reg);
106 tmp = (tmp & ~mask) | (val & mask);
107 writel(tmp, clk->reg);
108
109 spin_unlock_irqrestore(&clk->lock, flags);
110 }
111
112 static int plt_clk_set_parent(struct clk_hw *hw, u8 index)
113 {
114 struct clk_plt *clk = to_clk_plt(hw);
115
116 plt_clk_reg_update(clk, PMC_MASK_CLK_FREQ, plt_parent_to_reg(index));
117
118 return 0;
119 }
120
121 static u8 plt_clk_get_parent(struct clk_hw *hw)
122 {
123 struct clk_plt *clk = to_clk_plt(hw);
124 u32 value;
125
126 value = readl(clk->reg);
127
128 return plt_reg_to_parent(value);
129 }
130
131 static int plt_clk_enable(struct clk_hw *hw)
132 {
133 struct clk_plt *clk = to_clk_plt(hw);
134
135 plt_clk_reg_update(clk, PMC_MASK_CLK_CTL, PMC_CLK_CTL_FORCE_ON);
136
137 return 0;
138 }
139
140 static void plt_clk_disable(struct clk_hw *hw)
141 {
142 struct clk_plt *clk = to_clk_plt(hw);
143
144 plt_clk_reg_update(clk, PMC_MASK_CLK_CTL, PMC_CLK_CTL_FORCE_OFF);
145 }
146
147 static int plt_clk_is_enabled(struct clk_hw *hw)
148 {
149 struct clk_plt *clk = to_clk_plt(hw);
150 u32 value;
151
152 value = readl(clk->reg);
153
154 return plt_reg_to_enabled(value);
155 }
156
157 static const struct clk_ops plt_clk_ops = {
158 .enable = plt_clk_enable,
159 .disable = plt_clk_disable,
160 .is_enabled = plt_clk_is_enabled,
161 .get_parent = plt_clk_get_parent,
162 .set_parent = plt_clk_set_parent,
163 .determine_rate = __clk_mux_determine_rate,
164 };
165
166 static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
167 void __iomem *base,
168 const char **parent_names,
169 int num_parents)
170 {
171 struct clk_plt *pclk;
172 struct clk_init_data init;
173 int ret;
174
175 pclk = devm_kzalloc(&pdev->dev, sizeof(*pclk), GFP_KERNEL);
176 if (!pclk)
177 return ERR_PTR(-ENOMEM);
178
179 init.name = kasprintf(GFP_KERNEL, "%s_%d", PLT_CLK_NAME_BASE, id);
180 init.ops = &plt_clk_ops;
181 init.flags = 0;
182 init.parent_names = parent_names;
183 init.num_parents = num_parents;
184
185 pclk->hw.init = &init;
186 pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
187 spin_lock_init(&pclk->lock);
188
189 /*
190 * If the clock was already enabled by the firmware mark it as critical
191 * to avoid it being gated by the clock framework if no driver owns it.
192 */
193 if (plt_clk_is_enabled(&pclk->hw))
194 init.flags |= CLK_IS_CRITICAL;
195
196 ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
197 if (ret) {
198 pclk = ERR_PTR(ret);
199 goto err_free_init;
200 }
201
202 pclk->lookup = clkdev_hw_create(&pclk->hw, init.name, NULL);
203 if (!pclk->lookup) {
204 pclk = ERR_PTR(-ENOMEM);
205 goto err_free_init;
206 }
207
208 err_free_init:
209 kfree(init.name);
210 return pclk;
211 }
212
213 static void plt_clk_unregister(struct clk_plt *pclk)
214 {
215 clkdev_drop(pclk->lookup);
216 }
217
218 static struct clk_plt_fixed *plt_clk_register_fixed_rate(struct platform_device *pdev,
219 const char *name,
220 const char *parent_name,
221 unsigned long fixed_rate)
222 {
223 struct clk_plt_fixed *pclk;
224
225 pclk = devm_kzalloc(&pdev->dev, sizeof(*pclk), GFP_KERNEL);
226 if (!pclk)
227 return ERR_PTR(-ENOMEM);
228
229 pclk->clk = clk_hw_register_fixed_rate(&pdev->dev, name, parent_name,
230 0, fixed_rate);
231 if (IS_ERR(pclk->clk))
232 return ERR_CAST(pclk->clk);
233
234 pclk->lookup = clkdev_hw_create(pclk->clk, name, NULL);
235 if (!pclk->lookup) {
236 clk_hw_unregister_fixed_rate(pclk->clk);
237 return ERR_PTR(-ENOMEM);
238 }
239
240 return pclk;
241 }
242
243 static void plt_clk_unregister_fixed_rate(struct clk_plt_fixed *pclk)
244 {
245 clkdev_drop(pclk->lookup);
246 clk_hw_unregister_fixed_rate(pclk->clk);
247 }
248
249 static void plt_clk_unregister_fixed_rate_loop(struct clk_plt_data *data,
250 unsigned int i)
251 {
252 while (i--)
253 plt_clk_unregister_fixed_rate(data->parents[i]);
254 }
255
256 static void plt_clk_free_parent_names_loop(const char **parent_names,
257 unsigned int i)
258 {
259 while (i--)
260 kfree_const(parent_names[i]);
261 kfree(parent_names);
262 }
263
264 static void plt_clk_unregister_loop(struct clk_plt_data *data,
265 unsigned int i)
266 {
267 while (i--)
268 plt_clk_unregister(data->clks[i]);
269 }
270
271 static const char **plt_clk_register_parents(struct platform_device *pdev,
272 struct clk_plt_data *data,
273 const struct pmc_clk *clks)
274 {
275 const char **parent_names;
276 unsigned int i;
277 int err;
278 int nparents = 0;
279
280 data->nparents = 0;
281 while (clks[nparents].name)
282 nparents++;
283
284 data->parents = devm_kcalloc(&pdev->dev, nparents,
285 sizeof(*data->parents), GFP_KERNEL);
286 if (!data->parents)
287 return ERR_PTR(-ENOMEM);
288
289 parent_names = kcalloc(nparents, sizeof(*parent_names),
290 GFP_KERNEL);
291 if (!parent_names)
292 return ERR_PTR(-ENOMEM);
293
294 for (i = 0; i < nparents; i++) {
295 data->parents[i] =
296 plt_clk_register_fixed_rate(pdev, clks[i].name,
297 clks[i].parent_name,
298 clks[i].freq);
299 if (IS_ERR(data->parents[i])) {
300 err = PTR_ERR(data->parents[i]);
301 goto err_unreg;
302 }
303 parent_names[i] = kstrdup_const(clks[i].name, GFP_KERNEL);
304 }
305
306 data->nparents = nparents;
307 return parent_names;
308
309 err_unreg:
310 plt_clk_unregister_fixed_rate_loop(data, i);
311 plt_clk_free_parent_names_loop(parent_names, i);
312 return ERR_PTR(err);
313 }
314
315 static void plt_clk_unregister_parents(struct clk_plt_data *data)
316 {
317 plt_clk_unregister_fixed_rate_loop(data, data->nparents);
318 }
319
320 static int plt_clk_probe(struct platform_device *pdev)
321 {
322 const struct pmc_clk_data *pmc_data;
323 const char **parent_names;
324 struct clk_plt_data *data;
325 unsigned int i;
326 int err;
327
328 pmc_data = dev_get_platdata(&pdev->dev);
329 if (!pmc_data || !pmc_data->clks)
330 return -EINVAL;
331
332 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
333 if (!data)
334 return -ENOMEM;
335
336 parent_names = plt_clk_register_parents(pdev, data, pmc_data->clks);
337 if (IS_ERR(parent_names))
338 return PTR_ERR(parent_names);
339
340 for (i = 0; i < PMC_CLK_NUM; i++) {
341 data->clks[i] = plt_clk_register(pdev, i, pmc_data->base,
342 parent_names, data->nparents);
343 if (IS_ERR(data->clks[i])) {
344 err = PTR_ERR(data->clks[i]);
345 goto err_unreg_clk_plt;
346 }
347 }
348 data->mclk_lookup = clkdev_hw_create(&data->clks[3]->hw, "mclk", NULL);
349 if (!data->mclk_lookup) {
350 err = -ENOMEM;
351 goto err_unreg_clk_plt;
352 }
353
354 plt_clk_free_parent_names_loop(parent_names, data->nparents);
355
356 platform_set_drvdata(pdev, data);
357 return 0;
358
359 err_unreg_clk_plt:
360 plt_clk_unregister_loop(data, i);
361 plt_clk_unregister_parents(data);
362 plt_clk_free_parent_names_loop(parent_names, data->nparents);
363 return err;
364 }
365
366 static int plt_clk_remove(struct platform_device *pdev)
367 {
368 struct clk_plt_data *data;
369
370 data = platform_get_drvdata(pdev);
371
372 clkdev_drop(data->mclk_lookup);
373 plt_clk_unregister_loop(data, PMC_CLK_NUM);
374 plt_clk_unregister_parents(data);
375 return 0;
376 }
377
378 static struct platform_driver plt_clk_driver = {
379 .driver = {
380 .name = "clk-pmc-atom",
381 },
382 .probe = plt_clk_probe,
383 .remove = plt_clk_remove,
384 };
385 builtin_platform_driver(plt_clk_driver);