]> git.ipfire.org Git - people/ms/linux.git/blob - arch/arm/mach-pxa/pxa3xx.c
ARM: pxa: move smemc register access from clk to platform
[people/ms/linux.git] / arch / arm / mach-pxa / pxa3xx.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/arm/mach-pxa/pxa3xx.c
4 *
5 * code specific to pxa3xx aka Monahans
6 *
7 * Copyright (C) 2006 Marvell International Ltd.
8 *
9 * 2007-09-02: eric miao <eric.miao@marvell.com>
10 * initial version
11 */
12 #include <linux/dmaengine.h>
13 #include <linux/dma/pxa-dma.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/gpio-pxa.h>
18 #include <linux/pm.h>
19 #include <linux/platform_device.h>
20 #include <linux/irq.h>
21 #include <linux/irqchip.h>
22 #include <linux/io.h>
23 #include <linux/of.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/platform_data/i2c-pxa.h>
26 #include <linux/platform_data/mmp_dma.h>
27 #include <linux/soc/pxa/cpu.h>
28 #include <linux/clk/pxa.h>
29
30 #include <asm/mach/map.h>
31 #include <asm/suspend.h>
32 #include <mach/pxa3xx-regs.h>
33 #include <mach/reset.h>
34 #include <linux/platform_data/usb-ohci-pxa27x.h>
35 #include "pm.h"
36 #include "addr-map.h"
37 #include <mach/smemc.h>
38 #include <mach/irqs.h>
39
40 #include "generic.h"
41 #include "devices.h"
42
43 #define PECR_IE(n) ((1 << ((n) * 2)) << 28)
44 #define PECR_IS(n) ((1 << ((n) * 2)) << 29)
45
46 extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
47
48 /*
49 * NAND NFC: DFI bus arbitration subset
50 */
51 #define NDCR (*(volatile u32 __iomem*)(NAND_VIRT + 0))
52 #define NDCR_ND_ARB_EN (1 << 12)
53 #define NDCR_ND_ARB_CNTL (1 << 19)
54
55 #define CKEN_BOOT 11 /* < Boot rom clock enable */
56 #define CKEN_TPM 19 /* < TPM clock enable */
57 #define CKEN_HSIO2 41 /* < HSIO2 clock enable */
58
59 #ifdef CONFIG_PM
60
61 #define ISRAM_START 0x5c000000
62 #define ISRAM_SIZE SZ_256K
63
64 static void __iomem *sram;
65 static unsigned long wakeup_src;
66
67 /*
68 * Enter a standby mode (S0D1C2 or S0D2C2). Upon wakeup, the dynamic
69 * memory controller has to be reinitialised, so we place some code
70 * in the SRAM to perform this function.
71 *
72 * We disable FIQs across the standby - otherwise, we might receive a
73 * FIQ while the SDRAM is unavailable.
74 */
75 static void pxa3xx_cpu_standby(unsigned int pwrmode)
76 {
77 void (*fn)(unsigned int) = (void __force *)(sram + 0x8000);
78
79 memcpy_toio(sram + 0x8000, pm_enter_standby_start,
80 pm_enter_standby_end - pm_enter_standby_start);
81
82 AD2D0SR = ~0;
83 AD2D1SR = ~0;
84 AD2D0ER = wakeup_src;
85 AD2D1ER = 0;
86 ASCR = ASCR;
87 ARSR = ARSR;
88
89 local_fiq_disable();
90 fn(pwrmode);
91 local_fiq_enable();
92
93 AD2D0ER = 0;
94 AD2D1ER = 0;
95 }
96
97 /*
98 * NOTE: currently, the OBM (OEM Boot Module) binary comes along with
99 * PXA3xx development kits assumes that the resuming process continues
100 * with the address stored within the first 4 bytes of SDRAM. The PSPR
101 * register is used privately by BootROM and OBM, and _must_ be set to
102 * 0x5c014000 for the moment.
103 */
104 static void pxa3xx_cpu_pm_suspend(void)
105 {
106 volatile unsigned long *p = (volatile void *)0xc0000000;
107 unsigned long saved_data = *p;
108 #ifndef CONFIG_IWMMXT
109 u64 acc0;
110
111 asm volatile(".arch_extension xscale\n\t"
112 "mra %Q0, %R0, acc0" : "=r" (acc0));
113 #endif
114
115 /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
116 CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM);
117 CKENB |= 1 << (CKEN_HSIO2 & 0x1f);
118
119 /* clear and setup wakeup source */
120 AD3SR = ~0;
121 AD3ER = wakeup_src;
122 ASCR = ASCR;
123 ARSR = ARSR;
124
125 PCFR |= (1u << 13); /* L1_DIS */
126 PCFR &= ~((1u << 12) | (1u << 1)); /* L0_EN | SL_ROD */
127
128 PSPR = 0x5c014000;
129
130 /* overwrite with the resume address */
131 *p = __pa_symbol(cpu_resume);
132
133 cpu_suspend(0, pxa3xx_finish_suspend);
134
135 *p = saved_data;
136
137 AD3ER = 0;
138
139 #ifndef CONFIG_IWMMXT
140 asm volatile(".arch_extension xscale\n\t"
141 "mar acc0, %Q0, %R0" : "=r" (acc0));
142 #endif
143 }
144
145 static void pxa3xx_cpu_pm_enter(suspend_state_t state)
146 {
147 /*
148 * Don't sleep if no wakeup sources are defined
149 */
150 if (wakeup_src == 0) {
151 printk(KERN_ERR "Not suspending: no wakeup sources\n");
152 return;
153 }
154
155 switch (state) {
156 case PM_SUSPEND_STANDBY:
157 pxa3xx_cpu_standby(PXA3xx_PM_S0D2C2);
158 break;
159
160 case PM_SUSPEND_MEM:
161 pxa3xx_cpu_pm_suspend();
162 break;
163 }
164 }
165
166 static int pxa3xx_cpu_pm_valid(suspend_state_t state)
167 {
168 return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
169 }
170
171 static struct pxa_cpu_pm_fns pxa3xx_cpu_pm_fns = {
172 .valid = pxa3xx_cpu_pm_valid,
173 .enter = pxa3xx_cpu_pm_enter,
174 };
175
176 static void __init pxa3xx_init_pm(void)
177 {
178 sram = ioremap(ISRAM_START, ISRAM_SIZE);
179 if (!sram) {
180 printk(KERN_ERR "Unable to map ISRAM: disabling standby/suspend\n");
181 return;
182 }
183
184 /*
185 * Since we copy wakeup code into the SRAM, we need to ensure
186 * that it is preserved over the low power modes. Note: bit 8
187 * is undocumented in the developer manual, but must be set.
188 */
189 AD1R |= ADXR_L2 | ADXR_R0;
190 AD2R |= ADXR_L2 | ADXR_R0;
191 AD3R |= ADXR_L2 | ADXR_R0;
192
193 /*
194 * Clear the resume enable registers.
195 */
196 AD1D0ER = 0;
197 AD2D0ER = 0;
198 AD2D1ER = 0;
199 AD3ER = 0;
200
201 pxa_cpu_pm_fns = &pxa3xx_cpu_pm_fns;
202 }
203
204 static int pxa3xx_set_wake(struct irq_data *d, unsigned int on)
205 {
206 unsigned long flags, mask = 0;
207
208 switch (d->irq) {
209 case IRQ_SSP3:
210 mask = ADXER_MFP_WSSP3;
211 break;
212 case IRQ_MSL:
213 mask = ADXER_WMSL0;
214 break;
215 case IRQ_USBH2:
216 case IRQ_USBH1:
217 mask = ADXER_WUSBH;
218 break;
219 case IRQ_KEYPAD:
220 mask = ADXER_WKP;
221 break;
222 case IRQ_AC97:
223 mask = ADXER_MFP_WAC97;
224 break;
225 case IRQ_USIM:
226 mask = ADXER_WUSIM0;
227 break;
228 case IRQ_SSP2:
229 mask = ADXER_MFP_WSSP2;
230 break;
231 case IRQ_I2C:
232 mask = ADXER_MFP_WI2C;
233 break;
234 case IRQ_STUART:
235 mask = ADXER_MFP_WUART3;
236 break;
237 case IRQ_BTUART:
238 mask = ADXER_MFP_WUART2;
239 break;
240 case IRQ_FFUART:
241 mask = ADXER_MFP_WUART1;
242 break;
243 case IRQ_MMC:
244 mask = ADXER_MFP_WMMC1;
245 break;
246 case IRQ_SSP:
247 mask = ADXER_MFP_WSSP1;
248 break;
249 case IRQ_RTCAlrm:
250 mask = ADXER_WRTC;
251 break;
252 case IRQ_SSP4:
253 mask = ADXER_MFP_WSSP4;
254 break;
255 case IRQ_TSI:
256 mask = ADXER_WTSI;
257 break;
258 case IRQ_USIM2:
259 mask = ADXER_WUSIM1;
260 break;
261 case IRQ_MMC2:
262 mask = ADXER_MFP_WMMC2;
263 break;
264 case IRQ_NAND:
265 mask = ADXER_MFP_WFLASH;
266 break;
267 case IRQ_USB2:
268 mask = ADXER_WUSB2;
269 break;
270 case IRQ_WAKEUP0:
271 mask = ADXER_WEXTWAKE0;
272 break;
273 case IRQ_WAKEUP1:
274 mask = ADXER_WEXTWAKE1;
275 break;
276 case IRQ_MMC3:
277 mask = ADXER_MFP_GEN12;
278 break;
279 default:
280 return -EINVAL;
281 }
282
283 local_irq_save(flags);
284 if (on)
285 wakeup_src |= mask;
286 else
287 wakeup_src &= ~mask;
288 local_irq_restore(flags);
289
290 return 0;
291 }
292 #else
293 static inline void pxa3xx_init_pm(void) {}
294 #define pxa3xx_set_wake NULL
295 #endif
296
297 static void pxa_ack_ext_wakeup(struct irq_data *d)
298 {
299 PECR |= PECR_IS(d->irq - IRQ_WAKEUP0);
300 }
301
302 static void pxa_mask_ext_wakeup(struct irq_data *d)
303 {
304 pxa_mask_irq(d);
305 PECR &= ~PECR_IE(d->irq - IRQ_WAKEUP0);
306 }
307
308 static void pxa_unmask_ext_wakeup(struct irq_data *d)
309 {
310 pxa_unmask_irq(d);
311 PECR |= PECR_IE(d->irq - IRQ_WAKEUP0);
312 }
313
314 static int pxa_set_ext_wakeup_type(struct irq_data *d, unsigned int flow_type)
315 {
316 if (flow_type & IRQ_TYPE_EDGE_RISING)
317 PWER |= 1 << (d->irq - IRQ_WAKEUP0);
318
319 if (flow_type & IRQ_TYPE_EDGE_FALLING)
320 PWER |= 1 << (d->irq - IRQ_WAKEUP0 + 2);
321
322 return 0;
323 }
324
325 static struct irq_chip pxa_ext_wakeup_chip = {
326 .name = "WAKEUP",
327 .irq_ack = pxa_ack_ext_wakeup,
328 .irq_mask = pxa_mask_ext_wakeup,
329 .irq_unmask = pxa_unmask_ext_wakeup,
330 .irq_set_type = pxa_set_ext_wakeup_type,
331 };
332
333 static void __init pxa_init_ext_wakeup_irq(int (*fn)(struct irq_data *,
334 unsigned int))
335 {
336 int irq;
337
338 for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) {
339 irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip,
340 handle_edge_irq);
341 irq_clear_status_flags(irq, IRQ_NOREQUEST);
342 }
343
344 pxa_ext_wakeup_chip.irq_set_wake = fn;
345 }
346
347 static void __init __pxa3xx_init_irq(void)
348 {
349 /* enable CP6 access */
350 u32 value;
351 __asm__ __volatile__("mrc p15, 0, %0, c15, c1, 0\n": "=r"(value));
352 value |= (1 << 6);
353 __asm__ __volatile__("mcr p15, 0, %0, c15, c1, 0\n": :"r"(value));
354
355 pxa_init_ext_wakeup_irq(pxa3xx_set_wake);
356 }
357
358 void __init pxa3xx_init_irq(void)
359 {
360 __pxa3xx_init_irq();
361 pxa_init_irq(56, pxa3xx_set_wake);
362 }
363
364 #ifdef CONFIG_OF
365 static int __init __init
366 pxa3xx_dt_init_irq(struct device_node *node, struct device_node *parent)
367 {
368 __pxa3xx_init_irq();
369 pxa_dt_irq_init(pxa3xx_set_wake);
370 set_handle_irq(ichp_handle_irq);
371
372 return 0;
373 }
374 IRQCHIP_DECLARE(pxa3xx_intc, "marvell,pxa-intc", pxa3xx_dt_init_irq);
375 #endif /* CONFIG_OF */
376
377 static struct map_desc pxa3xx_io_desc[] __initdata = {
378 { /* Mem Ctl */
379 .virtual = (unsigned long)SMEMC_VIRT,
380 .pfn = __phys_to_pfn(PXA3XX_SMEMC_BASE),
381 .length = SMEMC_SIZE,
382 .type = MT_DEVICE
383 }, {
384 .virtual = (unsigned long)NAND_VIRT,
385 .pfn = __phys_to_pfn(NAND_PHYS),
386 .length = NAND_SIZE,
387 .type = MT_DEVICE
388 },
389 };
390
391 void __init pxa3xx_map_io(void)
392 {
393 pxa_map_io();
394 iotable_init(ARRAY_AND_SIZE(pxa3xx_io_desc));
395 pxa3xx_get_clk_frequency_khz(1);
396 }
397
398 /*
399 * device registration specific to PXA3xx.
400 */
401
402 void __init pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info)
403 {
404 pxa_register_device(&pxa3xx_device_i2c_power, info);
405 }
406
407 static struct pxa_gpio_platform_data pxa3xx_gpio_pdata = {
408 .irq_base = PXA_GPIO_TO_IRQ(0),
409 };
410
411 static struct platform_device *devices[] __initdata = {
412 &pxa27x_device_udc,
413 &pxa_device_pmu,
414 &pxa_device_i2s,
415 &pxa_device_asoc_ssp1,
416 &pxa_device_asoc_ssp2,
417 &pxa_device_asoc_ssp3,
418 &pxa_device_asoc_ssp4,
419 &pxa_device_asoc_platform,
420 &pxa_device_rtc,
421 &pxa3xx_device_ssp1,
422 &pxa3xx_device_ssp2,
423 &pxa3xx_device_ssp3,
424 &pxa3xx_device_ssp4,
425 &pxa27x_device_pwm0,
426 &pxa27x_device_pwm1,
427 };
428
429 static const struct dma_slave_map pxa3xx_slave_map[] = {
430 /* PXA25x, PXA27x and PXA3xx common entries */
431 { "pxa2xx-ac97", "pcm_pcm_mic_mono", PDMA_FILTER_PARAM(LOWEST, 8) },
432 { "pxa2xx-ac97", "pcm_pcm_aux_mono_in", PDMA_FILTER_PARAM(LOWEST, 9) },
433 { "pxa2xx-ac97", "pcm_pcm_aux_mono_out",
434 PDMA_FILTER_PARAM(LOWEST, 10) },
435 { "pxa2xx-ac97", "pcm_pcm_stereo_in", PDMA_FILTER_PARAM(LOWEST, 11) },
436 { "pxa2xx-ac97", "pcm_pcm_stereo_out", PDMA_FILTER_PARAM(LOWEST, 12) },
437 { "pxa-ssp-dai.0", "rx", PDMA_FILTER_PARAM(LOWEST, 13) },
438 { "pxa-ssp-dai.0", "tx", PDMA_FILTER_PARAM(LOWEST, 14) },
439 { "pxa-ssp-dai.1", "rx", PDMA_FILTER_PARAM(LOWEST, 15) },
440 { "pxa-ssp-dai.1", "tx", PDMA_FILTER_PARAM(LOWEST, 16) },
441 { "pxa2xx-ir", "rx", PDMA_FILTER_PARAM(LOWEST, 17) },
442 { "pxa2xx-ir", "tx", PDMA_FILTER_PARAM(LOWEST, 18) },
443 { "pxa2xx-mci.0", "rx", PDMA_FILTER_PARAM(LOWEST, 21) },
444 { "pxa2xx-mci.0", "tx", PDMA_FILTER_PARAM(LOWEST, 22) },
445 { "pxa-ssp-dai.2", "rx", PDMA_FILTER_PARAM(LOWEST, 66) },
446 { "pxa-ssp-dai.2", "tx", PDMA_FILTER_PARAM(LOWEST, 67) },
447
448 /* PXA3xx specific map */
449 { "pxa-ssp-dai.3", "rx", PDMA_FILTER_PARAM(LOWEST, 2) },
450 { "pxa-ssp-dai.3", "tx", PDMA_FILTER_PARAM(LOWEST, 3) },
451 { "pxa2xx-mci.1", "rx", PDMA_FILTER_PARAM(LOWEST, 93) },
452 { "pxa2xx-mci.1", "tx", PDMA_FILTER_PARAM(LOWEST, 94) },
453 { "pxa3xx-nand", "data", PDMA_FILTER_PARAM(LOWEST, 97) },
454 { "pxa2xx-mci.2", "rx", PDMA_FILTER_PARAM(LOWEST, 100) },
455 { "pxa2xx-mci.2", "tx", PDMA_FILTER_PARAM(LOWEST, 101) },
456 };
457
458 static struct mmp_dma_platdata pxa3xx_dma_pdata = {
459 .dma_channels = 32,
460 .nb_requestors = 100,
461 .slave_map = pxa3xx_slave_map,
462 .slave_map_cnt = ARRAY_SIZE(pxa3xx_slave_map),
463 };
464
465 static int __init pxa3xx_init(void)
466 {
467 int ret = 0;
468
469 if (cpu_is_pxa3xx()) {
470
471 pxa_register_wdt(ARSR);
472
473 /*
474 * clear RDH bit every time after reset
475 *
476 * Note: the last 3 bits DxS are write-1-to-clear so carefully
477 * preserve them here in case they will be referenced later
478 */
479 ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
480
481 /*
482 * Disable DFI bus arbitration, to prevent a system bus lock if
483 * somebody disables the NAND clock (unused clock) while this
484 * bit remains set.
485 */
486 NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL;
487
488 pxa3xx_init_pm();
489
490 enable_irq_wake(IRQ_WAKEUP0);
491 if (cpu_is_pxa320())
492 enable_irq_wake(IRQ_WAKEUP1);
493
494 register_syscore_ops(&pxa_irq_syscore_ops);
495 register_syscore_ops(&pxa3xx_mfp_syscore_ops);
496
497 if (of_have_populated_dt())
498 return 0;
499
500 pxa2xx_set_dmac_info(&pxa3xx_dma_pdata);
501 ret = platform_add_devices(devices, ARRAY_SIZE(devices));
502 if (ret)
503 return ret;
504 if (cpu_is_pxa300() || cpu_is_pxa310() || cpu_is_pxa320()) {
505 platform_device_add_data(&pxa3xx_device_gpio,
506 &pxa3xx_gpio_pdata,
507 sizeof(pxa3xx_gpio_pdata));
508 ret = platform_device_register(&pxa3xx_device_gpio);
509 }
510 }
511
512 return ret;
513 }
514
515 postcore_initcall(pxa3xx_init);