]> git.ipfire.org Git - people/ms/u-boot.git/blob - arch/blackfin/cpu/interrupt.S
0e5e59e15d6d903d9c0a9418c6e51ac6dd1899da
[people/ms/u-boot.git] / arch / blackfin / cpu / interrupt.S
1 /*
2 * interrupt.S - trampoline default exceptions/interrupts to C handlers
3 *
4 * Copyright (c) 2005-2009 Analog Devices Inc.
5 * Licensed under the GPL-2 or later.
6 */
7
8 #include <config.h>
9 #include <asm/blackfin.h>
10 #include <asm/entry.h>
11 #include <asm/ptrace.h>
12 #include <asm/deferred.h>
13 #include <asm/mach-common/bits/core.h>
14
15 .text
16
17 /* default entry point for exceptions */
18 ENTRY(_trap)
19 CONFIG_BFIN_SCRATCH_REG = sp;
20 sp.l = LO(L1_SRAM_SCRATCH_END - 20);
21 sp.h = HI(L1_SRAM_SCRATCH_END - 20);
22 SAVE_ALL_SYS
23
24 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
25 r1 = 3; /* EVT3 space */
26 sp += -12;
27 call _trap_c;
28 sp += 12;
29
30 #ifdef CONFIG_EXCEPTION_DEFER
31 CC = R0 == 0;
32 IF CC JUMP .Lexit_trap;
33
34 /* To avoid double faults, lower our priority to IRQ5 */
35 p4.l = lo(COREMMR_BASE);
36 p4.h = hi(COREMMR_BASE);
37
38 r7.h = _exception_to_level5;
39 r7.l = _exception_to_level5;
40 [p4 + (EVT5 - COREMMR_BASE)] = r7;
41
42 /*
43 * Save these registers, as they are only valid in exception context
44 * (where we are now - as soon as we defer to IRQ5, they can change)
45 */
46 p5.l = _deferred_regs;
47 p5.h = _deferred_regs;
48 r6 = [p4 + (DCPLB_FAULT_ADDR - COREMMR_BASE)];
49 [p5 + (deferred_regs_DCPLB_FAULT_ADDR * 4)] = r6;
50
51 r6 = [p4 + (ICPLB_FAULT_ADDR - COREMMR_BASE)];
52 [p5 + (deferred_regs_ICPLB_FAULT_ADDR * 4)] = r6;
53
54 /* Save the state of single stepping */
55 r6 = SYSCFG;
56 [p5 + (deferred_regs_SYSCFG * 4)] = r6;
57 /* Clear it while we handle the exception in IRQ5 mode
58 * RESTORE_ALL_SYS will load it, so all we need to do is store it
59 * in the right place
60 */
61 BITCLR(r6, SYSCFG_SSSTEP_P);
62 [SP + PT_SYSCFG] = r6;
63
64 /* Since we are going to clobber RETX, we need to save it */
65 r6 = retx;
66 [p5 + (deferred_regs_retx * 4)] = r6;
67
68 /* Save the current IMASK, since we change in order to jump to level 5 */
69 cli r6;
70 [p5 + (deferred_regs_IMASK * 4)] = r6;
71
72 /* Disable all interrupts, but make sure level 5 is enabled so
73 * we can switch to that level.
74 */
75 r6 = 0x3f;
76 sti r6;
77
78 /* Clobber RETX so we don't end up back at a faulting instruction */
79 [sp + PT_RETX] = r7;
80
81 /* In case interrupts are disabled IPEND[4] (global interrupt disable bit)
82 * clear it (re-enabling interrupts again) by the special sequence of pushing
83 * RETI onto the stack. This way we can lower ourselves to IVG5 even if the
84 * exception was taken after the interrupt handler was called but before it
85 * got a chance to enable global interrupts itself.
86 */
87 [--sp] = reti;
88 sp += 4;
89
90 RAISE 5;
91 .Lexit_trap:
92 #endif
93
94 #if ANOMALY_05000257
95 R7 = LC0;
96 LC0 = R7;
97 R7 = LC1;
98 LC1 = R7;
99 #endif
100
101 RESTORE_ALL_SYS
102 sp = CONFIG_BFIN_SCRATCH_REG;
103 rtx;
104 ENDPROC(_trap)
105
106 #ifdef CONFIG_EXCEPTION_DEFER
107 /* Deferred (IRQ5) exceptions */
108 ENTRY(_exception_to_level5)
109 SAVE_ALL_SYS
110
111 /* Now we have to fix things up */
112 p4.l = lo(EVT5);
113 p4.h = hi(EVT5);
114 r0.l = _evt_default;
115 r0.h = _evt_default;
116 [p4] = r0;
117 csync;
118
119 p4.l = _deferred_regs;
120 p4.h = _deferred_regs;
121 r0 = [p4 + (deferred_regs_retx * 4)];
122 [sp + PT_PC] = r0;
123
124 r0 = [p4 + (deferred_regs_SYSCFG * 4)];
125 [sp + PT_SYSCFG] = r0;
126
127 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
128 r1 = 5; /* EVT5 space */
129 sp += -12;
130 call _trap_c;
131 sp += 12;
132
133 /* Restore IMASK */
134 r0 = [p4 + (deferred_regs_IMASK * 4)];
135 sti r0;
136
137 RESTORE_ALL_SYS
138
139 rti;
140 ENDPROC(_exception_to_level5)
141 #endif
142
143 /* default entry point for interrupts */
144 ENTRY(_evt_default)
145 SAVE_ALL_SYS
146 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
147 sp += -12;
148 call _bfin_panic;
149 sp += 12;
150 RESTORE_ALL_SYS
151 rti;
152 ENDPROC(_evt_default)
153
154 /* NMI handler */
155 ENTRY(_evt_nmi)
156 rtn;
157 ENDPROC(_evt_nmi)