]> git.ipfire.org Git - thirdparty/u-boot.git/blame - arch/arm/mach-omap2/clocks-common.c
Revert "Merge patch series "arm: dts: am62-beagleplay: Fix Beagleplay Ethernet""
[thirdparty/u-boot.git] / arch / arm / mach-omap2 / clocks-common.c
CommitLineData
83d290c5 1// SPDX-License-Identifier: GPL-2.0+
3776801d
A
2/*
3 *
4 * Clock initialization for OMAP4
5 *
6 * (C) Copyright 2010
7 * Texas Instruments, <www.ti.com>
8 *
9 * Aneesh V <aneesh@ti.com>
10 *
11 * Based on previous work by:
12 * Santosh Shilimkar <santosh.shilimkar@ti.com>
13 * Rajendra Nayak <rnayak@ti.com>
3776801d 14 */
d678a59d 15#include <common.h>
db41d65a 16#include <hang.h>
63fc0c77 17#include <i2c.h>
691d719d 18#include <init.h>
f7ae49fc 19#include <log.h>
3776801d 20#include <asm/omap_common.h>
3b690ebb 21#include <asm/gpio.h>
af1d002f 22#include <asm/arch/clock.h>
3776801d
A
23#include <asm/arch/sys_proto.h>
24#include <asm/utils.h>
d506719f 25#include <asm/omap_gpio.h>
9ca8bfea 26#include <asm/emif.h>
3776801d
A
27
28#ifndef CONFIG_SPL_BUILD
29/*
30 * printing to console doesn't work unless
31 * this code is executed from SPL
32 */
33#define printf(fmt, args...)
34#define puts(s)
35#endif
36
ee9447bf
S
37const u32 sys_clk_array[8] = {
38 12000000, /* 12 MHz */
97405d84 39 20000000, /* 20 MHz */
ee9447bf
S
40 16800000, /* 16.8 MHz */
41 19200000, /* 19.2 MHz */
42 26000000, /* 26 MHz */
43 27000000, /* 27 MHz */
44 38400000, /* 38.4 MHz */
45};
46
3776801d
A
47static inline u32 __get_sys_clk_index(void)
48{
ea8eff1f 49 s8 ind;
3776801d
A
50 /*
51 * For ES1 the ROM code calibration of sys clock is not reliable
52 * due to hw issue. So, use hard-coded value. If this value is not
53 * correct for any board over-ride this function in board file
54 * From ES2.0 onwards you will get this information from
55 * CM_SYS_CLKSEL
56 */
57 if (omap_revision() == OMAP4430_ES1_0)
58 ind = OMAP_SYS_CLK_IND_38_4_MHZ;
59 else {
60 /* SYS_CLKSEL - 1 to match the dpll param array indices */
01b753ff 61 ind = (readl((*prcm)->cm_sys_clksel) &
3776801d
A
62 CM_SYS_CLKSEL_SYS_CLKSEL_MASK) - 1;
63 }
64 return ind;
65}
66
67u32 get_sys_clk_index(void)
68 __attribute__ ((weak, alias("__get_sys_clk_index")));
69
70u32 get_sys_clk_freq(void)
71{
72 u8 index = get_sys_clk_index();
73 return sys_clk_array[index];
74}
75
ee9447bf
S
76void setup_post_dividers(u32 const base, const struct dpll_params *params)
77{
78 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
79
80 /* Setup post-dividers */
81 if (params->m2 >= 0)
82 writel(params->m2, &dpll_regs->cm_div_m2_dpll);
83 if (params->m3 >= 0)
84 writel(params->m3, &dpll_regs->cm_div_m3_dpll);
85 if (params->m4_h11 >= 0)
86 writel(params->m4_h11, &dpll_regs->cm_div_m4_h11_dpll);
87 if (params->m5_h12 >= 0)
88 writel(params->m5_h12, &dpll_regs->cm_div_m5_h12_dpll);
89 if (params->m6_h13 >= 0)
90 writel(params->m6_h13, &dpll_regs->cm_div_m6_h13_dpll);
91 if (params->m7_h14 >= 0)
92 writel(params->m7_h14, &dpll_regs->cm_div_m7_h14_dpll);
47abc3df
S
93 if (params->h21 >= 0)
94 writel(params->h21, &dpll_regs->cm_div_h21_dpll);
ee9447bf
S
95 if (params->h22 >= 0)
96 writel(params->h22, &dpll_regs->cm_div_h22_dpll);
97 if (params->h23 >= 0)
98 writel(params->h23, &dpll_regs->cm_div_h23_dpll);
47abc3df
S
99 if (params->h24 >= 0)
100 writel(params->h24, &dpll_regs->cm_div_h24_dpll);
ee9447bf
S
101}
102
01b753ff 103static inline void do_bypass_dpll(u32 const base)
3776801d
A
104{
105 struct dpll_regs *dpll_regs = (struct dpll_regs *)base;
106
107 clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
108 CM_CLKMODE_DPLL_DPLL_EN_MASK,
109 DPLL_EN_FAST_RELOCK_BYPASS <<
110 CM_CLKMODE_DPLL_EN_SHIFT);
111}
112
01b753ff 113static inline void wait_for_bypass(u32 const base)
3776801d
A
114{
115 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
116
117 if (!wait_on_value(ST_DPLL_CLK_MASK, 0, &dpll_regs->cm_idlest_dpll,
118 LDELAY)) {
01b753ff 119 printf("Bypassing DPLL failed %x\n", base);
3776801d
A
120 }
121}
122
01b753ff 123static inline void do_lock_dpll(u32 const base)
3776801d
A
124{
125 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
126
127 clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
128 CM_CLKMODE_DPLL_DPLL_EN_MASK,
129 DPLL_EN_LOCK << CM_CLKMODE_DPLL_EN_SHIFT);
130}
131
01b753ff 132static inline void wait_for_lock(u32 const base)
3776801d
A
133{
134 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
135
136 if (!wait_on_value(ST_DPLL_CLK_MASK, ST_DPLL_CLK_MASK,
137 &dpll_regs->cm_idlest_dpll, LDELAY)) {
01b753ff 138 printf("DPLL locking failed for %x\n", base);
3776801d
A
139 hang();
140 }
141}
142
01b753ff 143inline u32 check_for_lock(u32 const base)
78f455c0
S
144{
145 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
146 u32 lock = readl(&dpll_regs->cm_idlest_dpll) & ST_DPLL_CLK_MASK;
147
148 return lock;
149}
150
ee9447bf
S
151const struct dpll_params *get_mpu_dpll_params(struct dplls const *dpll_data)
152{
153 u32 sysclk_ind = get_sys_clk_index();
154 return &dpll_data->mpu[sysclk_ind];
155}
156
157const struct dpll_params *get_core_dpll_params(struct dplls const *dpll_data)
158{
159 u32 sysclk_ind = get_sys_clk_index();
160 return &dpll_data->core[sysclk_ind];
161}
162
163const struct dpll_params *get_per_dpll_params(struct dplls const *dpll_data)
164{
165 u32 sysclk_ind = get_sys_clk_index();
166 return &dpll_data->per[sysclk_ind];
167}
168
169const struct dpll_params *get_iva_dpll_params(struct dplls const *dpll_data)
170{
171 u32 sysclk_ind = get_sys_clk_index();
172 return &dpll_data->iva[sysclk_ind];
173}
174
175const struct dpll_params *get_usb_dpll_params(struct dplls const *dpll_data)
176{
177 u32 sysclk_ind = get_sys_clk_index();
178 return &dpll_data->usb[sysclk_ind];
179}
180
181const struct dpll_params *get_abe_dpll_params(struct dplls const *dpll_data)
182{
183#ifdef CONFIG_SYS_OMAP_ABE_SYSCK
184 u32 sysclk_ind = get_sys_clk_index();
185 return &dpll_data->abe[sysclk_ind];
186#else
187 return dpll_data->abe;
188#endif
189}
190
ea8eff1f
LV
191static const struct dpll_params *get_ddr_dpll_params
192 (struct dplls const *dpll_data)
193{
194 u32 sysclk_ind = get_sys_clk_index();
195
196 if (!dpll_data->ddr)
197 return NULL;
198 return &dpll_data->ddr[sysclk_ind];
199}
200
65e9d56f
LV
201#ifdef CONFIG_DRIVER_TI_CPSW
202static const struct dpll_params *get_gmac_dpll_params
203 (struct dplls const *dpll_data)
204{
205 u32 sysclk_ind = get_sys_clk_index();
206
207 if (!dpll_data->gmac)
208 return NULL;
209 return &dpll_data->gmac[sysclk_ind];
210}
211#endif
212
01b753ff 213static void do_setup_dpll(u32 const base, const struct dpll_params *params,
78f455c0 214 u8 lock, char *dpll)
3776801d 215{
78f455c0 216 u32 temp, M, N;
3776801d
A
217 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
218
ea8eff1f
LV
219 if (!params)
220 return;
221
78f455c0
S
222 temp = readl(&dpll_regs->cm_clksel_dpll);
223
224 if (check_for_lock(base)) {
225 /*
226 * The Dpll has already been locked by rom code using CH.
227 * Check if M,N are matching with Ideal nominal opp values.
228 * If matches, skip the rest otherwise relock.
229 */
230 M = (temp & CM_CLKSEL_DPLL_M_MASK) >> CM_CLKSEL_DPLL_M_SHIFT;
231 N = (temp & CM_CLKSEL_DPLL_N_MASK) >> CM_CLKSEL_DPLL_N_SHIFT;
232 if ((M != (params->m)) || (N != (params->n))) {
233 debug("\n %s Dpll locked, but not for ideal M = %d,"
234 "N = %d values, current values are M = %d,"
235 "N= %d" , dpll, params->m, params->n,
236 M, N);
237 } else {
238 /* Dpll locked with ideal values for nominal opps. */
239 debug("\n %s Dpll already locked with ideal"
240 "nominal opp values", dpll);
9b77b191
LV
241
242 bypass_dpll(base);
78f455c0
S
243 goto setup_post_dividers;
244 }
245 }
246
3776801d
A
247 bypass_dpll(base);
248
249 /* Set M & N */
3776801d
A
250 temp &= ~CM_CLKSEL_DPLL_M_MASK;
251 temp |= (params->m << CM_CLKSEL_DPLL_M_SHIFT) & CM_CLKSEL_DPLL_M_MASK;
252
253 temp &= ~CM_CLKSEL_DPLL_N_MASK;
254 temp |= (params->n << CM_CLKSEL_DPLL_N_SHIFT) & CM_CLKSEL_DPLL_N_MASK;
255
256 writel(temp, &dpll_regs->cm_clksel_dpll);
257
9b77b191
LV
258setup_post_dividers:
259 setup_post_dividers(base, params);
260
3776801d
A
261 /* Lock */
262 if (lock)
263 do_lock_dpll(base);
264
3776801d
A
265 /* Wait till the DPLL locks */
266 if (lock)
267 wait_for_lock(base);
268}
269
2e5ba489 270u32 omap_ddr_clk(void)
3776801d 271{
2e5ba489 272 u32 ddr_clk, sys_clk_khz, omap_rev, divider;
3776801d
A
273 const struct dpll_params *core_dpll_params;
274
2e5ba489 275 omap_rev = omap_revision();
3776801d
A
276 sys_clk_khz = get_sys_clk_freq() / 1000;
277
ee9447bf 278 core_dpll_params = get_core_dpll_params(*dplls_data);
3776801d
A
279
280 debug("sys_clk %d\n ", sys_clk_khz * 1000);
281
282 /* Find Core DPLL locked frequency first */
283 ddr_clk = sys_clk_khz * 2 * core_dpll_params->m /
284 (core_dpll_params->n + 1);
3776801d 285
2e5ba489
S
286 if (omap_rev < OMAP5430_ES1_0) {
287 /*
288 * DDR frequency is PHY_ROOT_CLK/2
289 * PHY_ROOT_CLK = Fdpll/2/M2
290 */
291 divider = 4;
292 } else {
293 /*
294 * DDR frequency is PHY_ROOT_CLK
295 * PHY_ROOT_CLK = Fdpll/2/M2
296 */
297 divider = 2;
298 }
299
300 ddr_clk = ddr_clk / divider / core_dpll_params->m2;
3776801d
A
301 ddr_clk *= 1000; /* convert to Hz */
302 debug("ddr_clk %d\n ", ddr_clk);
303
304 return ddr_clk;
305}
306
b4dc6442
A
307/*
308 * Lock MPU dpll
309 *
310 * Resulting MPU frequencies:
311 * 4430 ES1.0 : 600 MHz
312 * 4430 ES2.x : 792 MHz (OPP Turbo)
313 * 4460 : 920 MHz (OPP Turbo) - DCC disabled
314 */
315void configure_mpu_dpll(void)
316{
317 const struct dpll_params *params;
318 struct dpll_regs *mpu_dpll_regs;
2e5ba489
S
319 u32 omap_rev;
320 omap_rev = omap_revision();
b4dc6442 321
2e5ba489
S
322 /*
323 * DCC and clock divider settings for 4460.
324 * DCC is required, if more than a certain frequency is required.
325 * For, 4460 > 1GHZ.
326 * 5430 > 1.4GHZ.
327 */
328 if ((omap_rev >= OMAP4460_ES1_0) && (omap_rev < OMAP5430_ES1_0)) {
b4dc6442 329 mpu_dpll_regs =
01b753ff
S
330 (struct dpll_regs *)((*prcm)->cm_clkmode_dpll_mpu);
331 bypass_dpll((*prcm)->cm_clkmode_dpll_mpu);
332 clrbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
b4dc6442 333 MPU_CLKCTRL_CLKSEL_EMIF_DIV_MODE_MASK);
01b753ff 334 setbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
b4dc6442
A
335 MPU_CLKCTRL_CLKSEL_ABE_DIV_MODE_MASK);
336 clrbits_le32(&mpu_dpll_regs->cm_clksel_dpll,
337 CM_CLKSEL_DCC_EN_MASK);
338 }
339
ee9447bf 340 params = get_mpu_dpll_params(*dplls_data);
78f455c0 341
01b753ff 342 do_setup_dpll((*prcm)->cm_clkmode_dpll_mpu, params, DPLL_LOCK, "mpu");
b4dc6442
A
343 debug("MPU DPLL locked\n");
344}
345
5e56b0a8
PK
346#if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \
347 defined(CONFIG_USB_MUSB_OMAP2PLUS)
860004c1
G
348static void setup_usb_dpll(void)
349{
350 const struct dpll_params *params;
351 u32 sys_clk_khz, sd_div, num, den;
352
353 sys_clk_khz = get_sys_clk_freq() / 1000;
354 /*
355 * USB:
356 * USB dpll is J-type. Need to set DPLL_SD_DIV for jitter correction
357 * DPLL_SD_DIV = CEILING ([DPLL_MULT/(DPLL_DIV+1)]* CLKINP / 250)
358 * - where CLKINP is sys_clk in MHz
359 * Use CLKINP in KHz and adjust the denominator accordingly so
360 * that we have enough accuracy and at the same time no overflow
361 */
ee9447bf 362 params = get_usb_dpll_params(*dplls_data);
860004c1
G
363 num = params->m * sys_clk_khz;
364 den = (params->n + 1) * 250 * 1000;
365 num += den - 1;
366 sd_div = num / den;
01b753ff 367 clrsetbits_le32((*prcm)->cm_clksel_dpll_usb,
860004c1
G
368 CM_CLKSEL_DPLL_DPLL_SD_DIV_MASK,
369 sd_div << CM_CLKSEL_DPLL_DPLL_SD_DIV_SHIFT);
370
371 /* Now setup the dpll with the regular function */
01b753ff 372 do_setup_dpll((*prcm)->cm_clkmode_dpll_usb, params, DPLL_LOCK, "usb");
860004c1
G
373}
374#endif
375
3776801d
A
376static void setup_dplls(void)
377{
164a7507 378 u32 temp;
3776801d 379 const struct dpll_params *params;
7c352cd3 380 struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE;
3776801d 381
164a7507 382 debug("setup_dplls\n");
3776801d
A
383
384 /* CORE dpll */
ee9447bf 385 params = get_core_dpll_params(*dplls_data); /* default - safest */
3776801d
A
386 /*
387 * Do not lock the core DPLL now. Just set it up.
388 * Core DPLL will be locked after setting up EMIF
389 * using the FREQ_UPDATE method(freq_update_core())
390 */
7c352cd3
TR
391 if (emif_sdram_type(readl(&emif->emif_sdram_config)) ==
392 EMIF_SDRAM_TYPE_LPDDR2)
01b753ff 393 do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
753bae8c
LV
394 DPLL_NO_LOCK, "core");
395 else
01b753ff 396 do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
753bae8c 397 DPLL_LOCK, "core");
3776801d
A
398 /* Set the ratios for CORE_CLK, L3_CLK, L4_CLK */
399 temp = (CLKSEL_CORE_X2_DIV_1 << CLKSEL_CORE_SHIFT) |
400 (CLKSEL_L3_CORE_DIV_2 << CLKSEL_L3_SHIFT) |
401 (CLKSEL_L4_L3_DIV_2 << CLKSEL_L4_SHIFT);
01b753ff 402 writel(temp, (*prcm)->cm_clksel_core);
3776801d
A
403 debug("Core DPLL configured\n");
404
405 /* lock PER dpll */
ee9447bf 406 params = get_per_dpll_params(*dplls_data);
01b753ff 407 do_setup_dpll((*prcm)->cm_clkmode_dpll_per,
78f455c0 408 params, DPLL_LOCK, "per");
3776801d
A
409 debug("PER DPLL locked\n");
410
411 /* MPU dpll */
b4dc6442 412 configure_mpu_dpll();
860004c1 413
5e56b0a8
PK
414#if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \
415 defined(CONFIG_USB_MUSB_OMAP2PLUS)
860004c1
G
416 setup_usb_dpll();
417#endif
ea8eff1f
LV
418 params = get_ddr_dpll_params(*dplls_data);
419 do_setup_dpll((*prcm)->cm_clkmode_dpll_ddrphy,
420 params, DPLL_LOCK, "ddr");
65e9d56f
LV
421
422#ifdef CONFIG_DRIVER_TI_CPSW
423 params = get_gmac_dpll_params(*dplls_data);
424 do_setup_dpll((*prcm)->cm_clkmode_dpll_gmac, params,
425 DPLL_LOCK, "gmac");
426#endif
3776801d
A
427}
428
3fcdd4a5 429u32 get_offset_code(u32 volt_offset, struct pmic_data *pmic)
d506719f 430{
3fcdd4a5 431 u32 offset_code;
d506719f 432
3fcdd4a5 433 volt_offset -= pmic->base_offset;
d506719f 434
3fcdd4a5 435 offset_code = (volt_offset + pmic->step - 1) / pmic->step;
3acb5534 436
3fcdd4a5
S
437 /*
438 * Offset codes 1-6 all give the base voltage in Palmas
439 * Offset code 0 switches OFF the SMPS
440 */
441 return offset_code + pmic->start_code;
d506719f
A
442}
443
3fcdd4a5 444void do_scale_vcore(u32 vcore_reg, u32 volt_mv, struct pmic_data *pmic)
3776801d 445{
a78274b2 446 u32 offset_code;
3776801d 447 u32 offset = volt_mv;
3fcdd4a5
S
448 int ret = 0;
449
63fc0c77
LV
450 if (!volt_mv)
451 return;
452
4ca94d81 453 pmic->pmic_bus_init();
3fcdd4a5
S
454 /* See if we can first get the GPIO if needed */
455 if (pmic->gpio_en)
456 ret = gpio_request(pmic->gpio, "PMIC_GPIO");
457
458 if (ret < 0) {
459 printf("%s: gpio %d request failed %d\n", __func__,
460 pmic->gpio, ret);
461 return;
462 }
463
464 /* Pull the GPIO low to select SET0 register, while we program SET1 */
465 if (pmic->gpio_en)
466 gpio_direction_output(pmic->gpio, 0);
5328717c 467
3776801d
A
468 /* convert to uV for better accuracy in the calculations */
469 offset *= 1000;
470
3fcdd4a5 471 offset_code = get_offset_code(offset, pmic);
3776801d
A
472
473 debug("do_scale_vcore: volt - %d offset_code - 0x%x\n", volt_mv,
474 offset_code);
8de17f46 475
4ca94d81 476 if (pmic->pmic_write(pmic->i2c_slave_addr, vcore_reg, offset_code))
3776801d 477 printf("Scaling voltage failed for 0x%x\n", vcore_reg);
3fcdd4a5
S
478 if (pmic->gpio_en)
479 gpio_direction_output(pmic->gpio, 1);
480}
481
beb71279
LV
482int __weak get_voltrail_opp(int rail_offset)
483{
484 /*
485 * By default return OPP_NOM for all voltage rails.
486 */
487 return OPP_NOM;
488}
489
490static u32 optimize_vcore_voltage(struct volts const *v, int opp)
18c9d55a
NM
491{
492 u32 val;
beb71279
LV
493
494 if (!v->value[opp])
18c9d55a 495 return 0;
beb71279
LV
496 if (!v->efuse.reg[opp])
497 return v->value[opp];
18c9d55a
NM
498
499 switch (v->efuse.reg_bits) {
500 case 16:
beb71279 501 val = readw(v->efuse.reg[opp]);
18c9d55a
NM
502 break;
503 case 32:
beb71279 504 val = readl(v->efuse.reg[opp]);
18c9d55a
NM
505 break;
506 default:
507 printf("Error: efuse 0x%08x bits=%d unknown\n",
beb71279
LV
508 v->efuse.reg[opp], v->efuse.reg_bits);
509 return v->value[opp];
18c9d55a
NM
510 }
511
512 if (!val) {
513 printf("Error: efuse 0x%08x bits=%d val=0, using %d\n",
beb71279
LV
514 v->efuse.reg[opp], v->efuse.reg_bits, v->value[opp]);
515 return v->value[opp];
18c9d55a
NM
516 }
517
518 debug("%s:efuse 0x%08x bits=%d Vnom=%d, using efuse value %d\n",
beb71279
LV
519 __func__, v->efuse.reg[opp], v->efuse.reg_bits, v->value[opp],
520 val);
18c9d55a
NM
521 return val;
522}
523
eda6fbcc
LV
524#ifdef CONFIG_IODELAY_RECALIBRATION
525void __weak recalibrate_iodelay(void)
526{
527}
528#endif
529
3fcdd4a5 530/*
b558af81
LP
531 * Setup the voltages for the main SoC core power domains.
532 * We start with the maximum voltages allowed here, as set in the corresponding
533 * vcores_data struct, and then scale (usually down) to the fused values that
534 * are retrieved from the SoC. The scaling happens only if the efuse.reg fields
535 * are initialised.
536 * Rail grouping is supported for the DRA7xx SoCs only, therefore the code is
537 * compiled conditionally. Note that the new code writes the scaled (or zeroed)
538 * values back to the vcores_data struct for eventual reuse. Zero values mean
539 * that the corresponding rails are not controlled separately, and are not sent
540 * to the PMIC.
3fcdd4a5
S
541 */
542void scale_vcores(struct vcores_data const *vcores)
543{
beb71279 544 int i, opp, j, ol;
b558af81
LP
545 struct volts *pv = (struct volts *)vcores;
546 struct volts *px;
547
548 for (i=0; i<(sizeof(struct vcores_data)/sizeof(struct volts)); i++) {
beb71279
LV
549 opp = get_voltrail_opp(i);
550 debug("%d -> ", pv->value[opp]);
551
552 if (pv->value[opp]) {
b558af81 553 /* Handle non-empty members only */
beb71279 554 pv->value[opp] = optimize_vcore_voltage(pv, opp);
0cf207ec 555 px = (struct volts *)vcores;
beb71279 556 j = 0;
b558af81
LP
557 while (px < pv) {
558 /*
559 * Scan already handled non-empty members to see
560 * if we have a group and find the max voltage,
561 * which is set to the first occurance of the
562 * particular SMPS; the other group voltages are
563 * zeroed.
564 */
beb71279
LV
565 ol = get_voltrail_opp(j);
566 if (px->value[ol] &&
567 (pv->pmic->i2c_slave_addr ==
568 px->pmic->i2c_slave_addr) &&
569 (pv->addr == px->addr)) {
570 /* Same PMIC, same SMPS */
571 if (pv->value[opp] > px->value[ol])
572 px->value[ol] = pv->value[opp];
573
574 pv->value[opp] = 0;
575 }
b558af81 576 px++;
beb71279 577 j++;
b558af81
LP
578 }
579 }
beb71279 580 debug("%d\n", pv->value[opp]);
b558af81
LP
581 pv++;
582 }
583
beb71279
LV
584 opp = get_voltrail_opp(VOLT_CORE);
585 debug("cor: %d\n", vcores->core.value[opp]);
586 do_scale_vcore(vcores->core.addr, vcores->core.value[opp],
587 vcores->core.pmic);
eda6fbcc
LV
588 /*
589 * IO delay recalibration should be done immediately after
590 * adjusting AVS voltages for VDD_CORE_L.
591 * Respective boards should call __recalibrate_iodelay()
592 * with proper mux, virtual and manual mode configurations.
593 */
594#ifdef CONFIG_IODELAY_RECALIBRATION
595 recalibrate_iodelay();
596#endif
597
beb71279
LV
598 opp = get_voltrail_opp(VOLT_MPU);
599 debug("mpu: %d\n", vcores->mpu.value[opp]);
600 do_scale_vcore(vcores->mpu.addr, vcores->mpu.value[opp],
601 vcores->mpu.pmic);
b558af81 602 /* Configure MPU ABB LDO after scale */
beb71279 603 abb_setup(vcores->mpu.efuse.reg[opp],
b558af81
LP
604 (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl,
605 (*prcm)->prm_abbldo_mpu_setup,
606 (*prcm)->prm_abbldo_mpu_ctrl,
607 (*prcm)->prm_irqstatus_mpu_2,
3708e78c 608 vcores->mpu.abb_tx_done_mask,
b558af81
LP
609 OMAP_ABB_FAST_OPP);
610
beb71279
LV
611 opp = get_voltrail_opp(VOLT_MM);
612 debug("mm: %d\n", vcores->mm.value[opp]);
613 do_scale_vcore(vcores->mm.addr, vcores->mm.value[opp],
614 vcores->mm.pmic);
c359ae5e 615 /* Configure MM ABB LDO after scale */
beb71279 616 abb_setup(vcores->mm.efuse.reg[opp],
c359ae5e
LV
617 (*ctrl)->control_wkup_ldovbb_mm_voltage_ctrl,
618 (*prcm)->prm_abbldo_mm_setup,
619 (*prcm)->prm_abbldo_mm_ctrl,
620 (*prcm)->prm_irqstatus_mpu,
621 vcores->mm.abb_tx_done_mask,
622 OMAP_ABB_FAST_OPP);
b558af81 623
beb71279
LV
624 opp = get_voltrail_opp(VOLT_GPU);
625 debug("gpu: %d\n", vcores->gpu.value[opp]);
626 do_scale_vcore(vcores->gpu.addr, vcores->gpu.value[opp],
627 vcores->gpu.pmic);
e52e334e 628 /* Configure GPU ABB LDO after scale */
beb71279 629 abb_setup(vcores->gpu.efuse.reg[opp],
e52e334e
NM
630 (*ctrl)->control_wkup_ldovbb_gpu_voltage_ctrl,
631 (*prcm)->prm_abbldo_gpu_setup,
632 (*prcm)->prm_abbldo_gpu_ctrl,
633 (*prcm)->prm_irqstatus_mpu,
634 vcores->gpu.abb_tx_done_mask,
635 OMAP_ABB_FAST_OPP);
beb71279
LV
636
637 opp = get_voltrail_opp(VOLT_EVE);
638 debug("eve: %d\n", vcores->eve.value[opp]);
639 do_scale_vcore(vcores->eve.addr, vcores->eve.value[opp],
640 vcores->eve.pmic);
e52e334e 641 /* Configure EVE ABB LDO after scale */
beb71279 642 abb_setup(vcores->eve.efuse.reg[opp],
e52e334e
NM
643 (*ctrl)->control_wkup_ldovbb_eve_voltage_ctrl,
644 (*prcm)->prm_abbldo_eve_setup,
645 (*prcm)->prm_abbldo_eve_ctrl,
646 (*prcm)->prm_irqstatus_mpu,
647 vcores->eve.abb_tx_done_mask,
648 OMAP_ABB_FAST_OPP);
beb71279
LV
649
650 opp = get_voltrail_opp(VOLT_IVA);
651 debug("iva: %d\n", vcores->iva.value[opp]);
652 do_scale_vcore(vcores->iva.addr, vcores->iva.value[opp],
653 vcores->iva.pmic);
e52e334e 654 /* Configure IVA ABB LDO after scale */
beb71279 655 abb_setup(vcores->iva.efuse.reg[opp],
e52e334e
NM
656 (*ctrl)->control_wkup_ldovbb_iva_voltage_ctrl,
657 (*prcm)->prm_abbldo_iva_setup,
658 (*prcm)->prm_abbldo_iva_ctrl,
659 (*prcm)->prm_irqstatus_mpu,
660 vcores->iva.abb_tx_done_mask,
661 OMAP_ABB_FAST_OPP);
3776801d
A
662}
663
01b753ff 664static inline void enable_clock_domain(u32 const clkctrl_reg, u32 enable_mode)
3776801d
A
665{
666 clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
667 enable_mode << CD_CLKCTRL_CLKTRCTRL_SHIFT);
01b753ff 668 debug("Enable clock domain - %x\n", clkctrl_reg);
3776801d
A
669}
670
16ca1d09
KVA
671static inline void disable_clock_domain(u32 const clkctrl_reg)
672{
673 clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
674 CD_CLKCTRL_CLKTRCTRL_SW_SLEEP <<
675 CD_CLKCTRL_CLKTRCTRL_SHIFT);
676 debug("Disable clock domain - %x\n", clkctrl_reg);
677}
678
01b753ff 679static inline void wait_for_clk_enable(u32 clkctrl_addr)
3776801d
A
680{
681 u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_DISABLED;
682 u32 bound = LDELAY;
683
684 while ((idlest == MODULE_CLKCTRL_IDLEST_DISABLED) ||
685 (idlest == MODULE_CLKCTRL_IDLEST_TRANSITIONING)) {
686
687 clkctrl = readl(clkctrl_addr);
688 idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
689 MODULE_CLKCTRL_IDLEST_SHIFT;
690 if (--bound == 0) {
01b753ff 691 printf("Clock enable failed for 0x%x idlest 0x%x\n",
3776801d
A
692 clkctrl_addr, clkctrl);
693 return;
694 }
695 }
696}
697
01b753ff 698static inline void enable_clock_module(u32 const clkctrl_addr, u32 enable_mode,
3776801d
A
699 u32 wait_for_enable)
700{
701 clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
702 enable_mode << MODULE_CLKCTRL_MODULEMODE_SHIFT);
01b753ff 703 debug("Enable clock module - %x\n", clkctrl_addr);
3776801d
A
704 if (wait_for_enable)
705 wait_for_clk_enable(clkctrl_addr);
706}
707
16ca1d09
KVA
708static inline void wait_for_clk_disable(u32 clkctrl_addr)
709{
710 u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_FULLY_FUNCTIONAL;
711 u32 bound = LDELAY;
712
713 while ((idlest != MODULE_CLKCTRL_IDLEST_DISABLED)) {
714 clkctrl = readl(clkctrl_addr);
715 idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
716 MODULE_CLKCTRL_IDLEST_SHIFT;
717 if (--bound == 0) {
718 printf("Clock disable failed for 0x%x idlest 0x%x\n",
719 clkctrl_addr, clkctrl);
720 return;
721 }
722 }
723}
724
725static inline void disable_clock_module(u32 const clkctrl_addr,
726 u32 wait_for_disable)
727{
728 clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
729 MODULE_CLKCTRL_MODULEMODE_SW_DISABLE <<
730 MODULE_CLKCTRL_MODULEMODE_SHIFT);
731 debug("Disable clock module - %x\n", clkctrl_addr);
732 if (wait_for_disable)
733 wait_for_clk_disable(clkctrl_addr);
734}
735
3776801d
A
736void freq_update_core(void)
737{
738 u32 freq_config1 = 0;
739 const struct dpll_params *core_dpll_params;
f4010734 740 u32 omap_rev = omap_revision();
3776801d 741
ee9447bf 742 core_dpll_params = get_core_dpll_params(*dplls_data);
3776801d 743 /* Put EMIF clock domain in sw wakeup mode */
01b753ff 744 enable_clock_domain((*prcm)->cm_memif_clkstctrl,
3776801d 745 CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
01b753ff
S
746 wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
747 wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
3776801d
A
748
749 freq_config1 = SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK |
750 SHADOW_FREQ_CONFIG1_DLL_RESET_MASK;
751
752 freq_config1 |= (DPLL_EN_LOCK << SHADOW_FREQ_CONFIG1_DPLL_EN_SHIFT) &
753 SHADOW_FREQ_CONFIG1_DPLL_EN_MASK;
754
755 freq_config1 |= (core_dpll_params->m2 <<
756 SHADOW_FREQ_CONFIG1_M2_DIV_SHIFT) &
757 SHADOW_FREQ_CONFIG1_M2_DIV_MASK;
758
01b753ff 759 writel(freq_config1, (*prcm)->cm_shadow_freq_config1);
3776801d 760 if (!wait_on_value(SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK, 0,
01b753ff 761 (u32 *) (*prcm)->cm_shadow_freq_config1, LDELAY)) {
3776801d
A
762 puts("FREQ UPDATE procedure failed!!");
763 hang();
764 }
765
f4010734
S
766 /*
767 * Putting EMIF in HW_AUTO is seen to be causing issues with
a8f408a8 768 * EMIF clocks and the master DLL. Keep EMIF in SW_WKUP
f4010734
S
769 * in OMAP5430 ES1.0 silicon
770 */
771 if (omap_rev != OMAP5430_ES1_0) {
772 /* Put EMIF clock domain back in hw auto mode */
01b753ff 773 enable_clock_domain((*prcm)->cm_memif_clkstctrl,
f4010734 774 CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
01b753ff
S
775 wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
776 wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
f4010734 777 }
3776801d
A
778}
779
01b753ff 780void bypass_dpll(u32 const base)
3776801d
A
781{
782 do_bypass_dpll(base);
783 wait_for_bypass(base);
784}
785
01b753ff 786void lock_dpll(u32 const base)
3776801d
A
787{
788 do_lock_dpll(base);
789 wait_for_lock(base);
790}
791
93e6253d 792static void setup_clocks_for_console(void)
bcae7211
A
793{
794 /* Do not add any spl_debug prints in this function */
01b753ff 795 clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
bcae7211
A
796 CD_CLKCTRL_CLKTRCTRL_SW_WKUP <<
797 CD_CLKCTRL_CLKTRCTRL_SHIFT);
798
799 /* Enable all UARTs - console will be on one of them */
01b753ff 800 clrsetbits_le32((*prcm)->cm_l4per_uart1_clkctrl,
bcae7211
A
801 MODULE_CLKCTRL_MODULEMODE_MASK,
802 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
803 MODULE_CLKCTRL_MODULEMODE_SHIFT);
804
01b753ff 805 clrsetbits_le32((*prcm)->cm_l4per_uart2_clkctrl,
bcae7211
A
806 MODULE_CLKCTRL_MODULEMODE_MASK,
807 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
808 MODULE_CLKCTRL_MODULEMODE_SHIFT);
809
01b753ff 810 clrsetbits_le32((*prcm)->cm_l4per_uart3_clkctrl,
bcae7211
A
811 MODULE_CLKCTRL_MODULEMODE_MASK,
812 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
813 MODULE_CLKCTRL_MODULEMODE_SHIFT);
814
a8f408a8 815 clrsetbits_le32((*prcm)->cm_l4per_uart4_clkctrl,
bcae7211
A
816 MODULE_CLKCTRL_MODULEMODE_MASK,
817 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
818 MODULE_CLKCTRL_MODULEMODE_SHIFT);
819
01b753ff 820 clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
bcae7211
A
821 CD_CLKCTRL_CLKTRCTRL_HW_AUTO <<
822 CD_CLKCTRL_CLKTRCTRL_SHIFT);
823}
824
01b753ff
S
825void do_enable_clocks(u32 const *clk_domains,
826 u32 const *clk_modules_hw_auto,
827 u32 const *clk_modules_explicit_en,
2e5ba489
S
828 u8 wait_for_enable)
829{
830 u32 i, max = 100;
831
832 /* Put the clock domains in SW_WKUP mode */
b8c90876 833 for (i = 0; (i < max) && clk_domains && clk_domains[i]; i++) {
2e5ba489
S
834 enable_clock_domain(clk_domains[i],
835 CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
836 }
837
838 /* Clock modules that need to be put in HW_AUTO */
b8c90876
LM
839 for (i = 0; (i < max) && clk_modules_hw_auto &&
840 clk_modules_hw_auto[i]; i++) {
2e5ba489
S
841 enable_clock_module(clk_modules_hw_auto[i],
842 MODULE_CLKCTRL_MODULEMODE_HW_AUTO,
843 wait_for_enable);
844 };
845
846 /* Clock modules that need to be put in SW_EXPLICIT_EN mode */
b8c90876
LM
847 for (i = 0; (i < max) && clk_modules_explicit_en &&
848 clk_modules_explicit_en[i]; i++) {
2e5ba489
S
849 enable_clock_module(clk_modules_explicit_en[i],
850 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN,
851 wait_for_enable);
852 };
853
854 /* Put the clock domains in HW_AUTO mode now */
b8c90876 855 for (i = 0; (i < max) && clk_domains && clk_domains[i]; i++) {
2e5ba489
S
856 enable_clock_domain(clk_domains[i],
857 CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
858 }
859}
860
0197909d
K
861void do_enable_ipu_clocks(u32 const *clk_domains,
862 u32 const *clk_modules_hw_auto,
863 u32 const *clk_modules_explicit_en,
864 u8 wait_for_enable)
865{
866 u32 i, max = 10;
867
868 if (!IS_ENABLED(CONFIG_REMOTEPROC_TI_IPU))
869 return;
870
871 /* Put the clock domains in SW_WKUP mode */
872 for (i = 0; (i < max) && clk_domains && clk_domains[i]; i++) {
873 enable_clock_domain(clk_domains[i],
874 CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
875 }
876
877 /* Clock modules that need to be put in HW_AUTO */
878 for (i = 0; (i < max) && clk_modules_hw_auto &&
879 clk_modules_hw_auto[i]; i++) {
880 enable_clock_module(clk_modules_hw_auto[i],
881 MODULE_CLKCTRL_MODULEMODE_HW_AUTO,
882 wait_for_enable);
883 };
884
885 /* Clock modules that need to be put in SW_EXPLICIT_EN mode */
886 for (i = 0; (i < max) && clk_modules_explicit_en &&
887 clk_modules_explicit_en[i]; i++) {
888 enable_clock_module(clk_modules_explicit_en[i],
889 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN,
890 wait_for_enable);
891 };
892}
893
16ca1d09
KVA
894void do_disable_clocks(u32 const *clk_domains,
895 u32 const *clk_modules_disable,
896 u8 wait_for_disable)
897{
898 u32 i, max = 100;
899
900
901 /* Clock modules that need to be put in SW_DISABLE */
902 for (i = 0; (i < max) && clk_modules_disable[i]; i++)
903 disable_clock_module(clk_modules_disable[i],
904 wait_for_disable);
905
906 /* Put the clock domains in SW_SLEEP mode */
907 for (i = 0; (i < max) && clk_domains[i]; i++)
908 disable_clock_domain(clk_domains[i]);
909}
910
93e6253d
KS
911/**
912 * setup_early_clocks() - Setup early clocks needed for SoC
913 *
914 * Setup clocks for console, SPL basic initialization clocks and initialize
915 * the timer. This is invoked prior prcm_init.
916 */
917void setup_early_clocks(void)
3776801d 918{
508a58fa 919 switch (omap_hw_init_context()) {
3776801d
A
920 case OMAP_INIT_CONTEXT_SPL:
921 case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
922 case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
93e6253d 923 setup_clocks_for_console();
25223a68 924 enable_basic_clocks();
3332b244 925 timer_init();
93e6253d
KS
926 /* Fall through */
927 }
928}
929
930void prcm_init(void)
931{
932 switch (omap_hw_init_context()) {
933 case OMAP_INIT_CONTEXT_SPL:
934 case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
935 case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
3fcdd4a5 936 scale_vcores(*omap_vcores);
3776801d 937 setup_dplls();
0b1b60c7 938 setup_warmreset_time();
3776801d
A
939 break;
940 default:
941 break;
942 }
78f455c0
S
943
944 if (OMAP_INIT_CONTEXT_SPL != omap_hw_init_context())
945 enable_basic_uboot_clocks();
3776801d 946}
63fc0c77 947
2147a169 948#if !CONFIG_IS_ENABLED(DM_I2C)
63fc0c77
LV
949void gpi2c_init(void)
950{
951 static int gpi2c = 1;
952
953 if (gpi2c) {
14376b8e
TR
954 i2c_init(CONFIG_SYS_I2C_SPEED,
955 CONFIG_SYS_I2C_SLAVE);
63fc0c77
LV
956 gpi2c = 0;
957 }
958}
1514244c 959#endif