]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/csky/kernel/entry.S
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / arch / csky / kernel / entry.S
1 /* SPDX-License-Identifier: GPL-2.0 */
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4 #include <linux/linkage.h>
5 #include <abi/entry.h>
6 #include <abi/pgtable-bits.h>
7 #include <asm/errno.h>
8 #include <asm/setup.h>
9 #include <asm/unistd.h>
10 #include <asm/asm-offsets.h>
11 #include <linux/threads.h>
12 #include <asm/setup.h>
13 #include <asm/page.h>
14 #include <asm/thread_info.h>
15
16 #define PTE_INDX_MSK 0xffc
17 #define PTE_INDX_SHIFT 10
18 #define _PGDIR_SHIFT 22
19
20 .macro zero_fp
21 #ifdef CONFIG_STACKTRACE
22 movi r8, 0
23 #endif
24 .endm
25
26 .macro tlbop_begin name, val0, val1, val2
27 ENTRY(csky_\name)
28 mtcr a3, ss2
29 mtcr r6, ss3
30 mtcr a2, ss4
31
32 RD_PGDR r6
33 RD_MEH a3
34 #ifdef CONFIG_CPU_HAS_TLBI
35 tlbi.vaas a3
36 sync.is
37
38 btsti a3, 31
39 bf 1f
40 RD_PGDR_K r6
41 1:
42 #else
43 bgeni a2, 31
44 WR_MCIR a2
45 bgeni a2, 25
46 WR_MCIR a2
47 #endif
48 bclri r6, 0
49 lrw a2, va_pa_offset
50 ld.w a2, (a2, 0)
51 subu r6, a2
52 bseti r6, 31
53
54 mov a2, a3
55 lsri a2, _PGDIR_SHIFT
56 lsli a2, 2
57 addu r6, a2
58 ldw r6, (r6)
59
60 lrw a2, va_pa_offset
61 ld.w a2, (a2, 0)
62 subu r6, a2
63 bseti r6, 31
64
65 lsri a3, PTE_INDX_SHIFT
66 lrw a2, PTE_INDX_MSK
67 and a3, a2
68 addu r6, a3
69 ldw a3, (r6)
70
71 movi a2, (_PAGE_PRESENT | \val0)
72 and a3, a2
73 cmpne a3, a2
74 bt \name
75
76 /* First read/write the page, just update the flags */
77 ldw a3, (r6)
78 bgeni a2, PAGE_VALID_BIT
79 bseti a2, PAGE_ACCESSED_BIT
80 bseti a2, \val1
81 bseti a2, \val2
82 or a3, a2
83 stw a3, (r6)
84
85 /* Some cpu tlb-hardrefill bypass the cache */
86 #ifdef CONFIG_CPU_NEED_TLBSYNC
87 movi a2, 0x22
88 bseti a2, 6
89 mtcr r6, cr22
90 mtcr a2, cr17
91 sync
92 #endif
93
94 mfcr a3, ss2
95 mfcr r6, ss3
96 mfcr a2, ss4
97 rte
98 \name:
99 mfcr a3, ss2
100 mfcr r6, ss3
101 mfcr a2, ss4
102 SAVE_ALL 0
103 .endm
104 .macro tlbop_end is_write
105 zero_fp
106 RD_MEH a2
107 psrset ee, ie
108 mov a0, sp
109 movi a1, \is_write
110 jbsr do_page_fault
111 jmpi ret_from_exception
112 .endm
113
114 .text
115
116 tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT
117 tlbop_end 0
118
119 tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
120 tlbop_end 1
121
122 tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
123 #ifndef CONFIG_CPU_HAS_LDSTEX
124 jbsr csky_cmpxchg_fixup
125 #endif
126 tlbop_end 1
127
128 ENTRY(csky_systemcall)
129 SAVE_ALL TRAP0_SIZE
130 zero_fp
131 #ifdef CONFIG_RSEQ_DEBUG
132 mov a0, sp
133 jbsr rseq_syscall
134 #endif
135 psrset ee, ie
136
137 lrw r11, __NR_syscalls
138 cmphs syscallid, r11 /* Check nr of syscall */
139 bt ret_from_exception
140
141 lrw r13, sys_call_table
142 ixw r13, syscallid
143 ldw r11, (r13)
144 cmpnei r11, 0
145 bf ret_from_exception
146
147 mov r9, sp
148 bmaski r10, THREAD_SHIFT
149 andn r9, r10
150 ldw r12, (r9, TINFO_FLAGS)
151 ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
152 cmpnei r12, 0
153 bt csky_syscall_trace
154 #if defined(__CSKYABIV2__)
155 subi sp, 8
156 stw r5, (sp, 0x4)
157 stw r4, (sp, 0x0)
158 jsr r11 /* Do system call */
159 addi sp, 8
160 #else
161 jsr r11
162 #endif
163 stw a0, (sp, LSAVE_A0) /* Save return value */
164 jmpi ret_from_exception
165
166 csky_syscall_trace:
167 mov a0, sp /* sp = pt_regs pointer */
168 jbsr syscall_trace_enter
169 /* Prepare args before do system call */
170 ldw a0, (sp, LSAVE_A0)
171 ldw a1, (sp, LSAVE_A1)
172 ldw a2, (sp, LSAVE_A2)
173 ldw a3, (sp, LSAVE_A3)
174 #if defined(__CSKYABIV2__)
175 subi sp, 8
176 stw r5, (sp, 0x4)
177 stw r4, (sp, 0x0)
178 #else
179 ldw r6, (sp, LSAVE_A4)
180 ldw r7, (sp, LSAVE_A5)
181 #endif
182 jsr r11 /* Do system call */
183 #if defined(__CSKYABIV2__)
184 addi sp, 8
185 #endif
186 stw a0, (sp, LSAVE_A0) /* Save return value */
187
188 mov a0, sp /* right now, sp --> pt_regs */
189 jbsr syscall_trace_exit
190 br ret_from_exception
191
192 ENTRY(ret_from_kernel_thread)
193 jbsr schedule_tail
194 mov a0, r10
195 jsr r9
196 jbsr ret_from_exception
197
198 ENTRY(ret_from_fork)
199 jbsr schedule_tail
200 mov r9, sp
201 bmaski r10, THREAD_SHIFT
202 andn r9, r10
203 ldw r12, (r9, TINFO_FLAGS)
204 ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
205 cmpnei r12, 0
206 bf ret_from_exception
207 mov a0, sp /* sp = pt_regs pointer */
208 jbsr syscall_trace_exit
209
210 ret_from_exception:
211 ld syscallid, (sp, LSAVE_PSR)
212 btsti syscallid, 31
213 bt 1f
214
215 /*
216 * Load address of current->thread_info, Then get address of task_struct
217 * Get task_needreshed in task_struct
218 */
219 mov r9, sp
220 bmaski r10, THREAD_SHIFT
221 andn r9, r10
222
223 ldw r12, (r9, TINFO_FLAGS)
224 andi r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | _TIF_UPROBE)
225 cmpnei r12, 0
226 bt exit_work
227 1:
228 #ifdef CONFIG_TRACE_IRQFLAGS
229 ld r10, (sp, LSAVE_PSR)
230 btsti r10, 6
231 bf 2f
232 jbsr trace_hardirqs_on
233 2:
234 #endif
235 RESTORE_ALL
236
237 exit_work:
238 lrw syscallid, ret_from_exception
239 mov lr, syscallid
240
241 btsti r12, TIF_NEED_RESCHED
242 bt work_resched
243
244 mov a0, sp
245 mov a1, r12
246 jmpi do_notify_resume
247
248 work_resched:
249 jmpi schedule
250
251 ENTRY(csky_trap)
252 SAVE_ALL 0
253 zero_fp
254 psrset ee
255 mov a0, sp /* Push Stack pointer arg */
256 jbsr trap_c /* Call C-level trap handler */
257 jmpi ret_from_exception
258
259 /*
260  * Prototype from libc for abiv1:
261  * register unsigned int __result asm("a0");
262  * asm( "trap 3" :"=r"(__result)::);
263  */
264 ENTRY(csky_get_tls)
265 USPTOKSP
266
267 /* increase epc for continue */
268 mfcr a0, epc
269 addi a0, TRAP0_SIZE
270 mtcr a0, epc
271
272 /* get current task thread_info with kernel 8K stack */
273 bmaski a0, THREAD_SHIFT
274 not a0
275 subi sp, 1
276 and a0, sp
277 addi sp, 1
278
279 /* get tls */
280 ldw a0, (a0, TINFO_TP_VALUE)
281
282 KSPTOUSP
283 rte
284
285 ENTRY(csky_irq)
286 SAVE_ALL 0
287 zero_fp
288 psrset ee
289
290 #ifdef CONFIG_TRACE_IRQFLAGS
291 jbsr trace_hardirqs_off
292 #endif
293
294 #ifdef CONFIG_PREEMPTION
295 mov r9, sp /* Get current stack pointer */
296 bmaski r10, THREAD_SHIFT
297 andn r9, r10 /* Get thread_info */
298
299 /*
300 * Get task_struct->stack.preempt_count for current,
301 * and increase 1.
302 */
303 ldw r12, (r9, TINFO_PREEMPT)
304 addi r12, 1
305 stw r12, (r9, TINFO_PREEMPT)
306 #endif
307
308 mov a0, sp
309 jbsr csky_do_IRQ
310
311 #ifdef CONFIG_PREEMPTION
312 subi r12, 1
313 stw r12, (r9, TINFO_PREEMPT)
314 cmpnei r12, 0
315 bt 2f
316 ldw r12, (r9, TINFO_FLAGS)
317 btsti r12, TIF_NEED_RESCHED
318 bf 2f
319 jbsr preempt_schedule_irq /* irq en/disable is done inside */
320 #endif
321 2:
322 jmpi ret_from_exception
323
324 /*
325 * a0 = prev task_struct *
326 * a1 = next task_struct *
327 * a0 = return next
328 */
329 ENTRY(__switch_to)
330 lrw a3, TASK_THREAD
331 addu a3, a0
332
333 mfcr a2, psr /* Save PSR value */
334 stw a2, (a3, THREAD_SR) /* Save PSR in task struct */
335 bclri a2, 6 /* Disable interrupts */
336 mtcr a2, psr
337
338 SAVE_SWITCH_STACK
339
340 stw sp, (a3, THREAD_KSP)
341
342 /* Set up next process to run */
343 lrw a3, TASK_THREAD
344 addu a3, a1
345
346 ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */
347
348 ldw a2, (a3, THREAD_SR) /* Set next PSR */
349 mtcr a2, psr
350
351 #if defined(__CSKYABIV2__)
352 addi r7, a1, TASK_THREAD_INFO
353 ldw tls, (r7, TINFO_TP_VALUE)
354 #endif
355
356 RESTORE_SWITCH_STACK
357
358 rts
359 ENDPROC(__switch_to)