]>
Commit | Line | Data |
---|---|---|
c92e08e3 AV |
1 | /* CMSE wrapper function used to save, clear and restore callee saved registers |
2 | for cmse_nonsecure_call's. | |
3 | ||
83ffe9cd | 4 | Copyright (C) 2016-2023 Free Software Foundation, Inc. |
c92e08e3 AV |
5 | Contributed by ARM Ltd. |
6 | ||
7 | This file is free software; you can redistribute it and/or modify it | |
8 | under the terms of the GNU General Public License as published by the | |
9 | Free Software Foundation; either version 3, or (at your option) any | |
10 | later version. | |
11 | ||
12 | This file is distributed in the hope that it will be useful, but | |
13 | WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | General Public License for more details. | |
16 | ||
17 | Under Section 7 of GPL version 3, you are granted additional | |
18 | permissions described in the GCC Runtime Library Exception, version | |
19 | 3.1, as published by the Free Software Foundation. | |
20 | ||
21 | You should have received a copy of the GNU General Public License and | |
22 | a copy of the GCC Runtime Library Exception along with this program; | |
23 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
24 | <http://www.gnu.org/licenses/>. */ | |
25 | ||
26 | .syntax unified | |
f0cd49c5 | 27 | #ifdef __ARM_PCS_VFP |
c5ed0148 | 28 | # if (__ARM_FP & 0x8) || (__ARM_FEATURE_MVE & 1) |
f0cd49c5 RE |
29 | .fpu fpv5-d16 |
30 | # else | |
31 | .fpu fpv4-sp-d16 | |
32 | # endif | |
33 | #endif | |
34 | ||
c92e08e3 AV |
35 | .thumb |
36 | .global __gnu_cmse_nonsecure_call | |
37 | __gnu_cmse_nonsecure_call: | |
38 | #if defined(__ARM_ARCH_8M_MAIN__) | |
39 | push {r5-r11,lr} | |
40 | mov r7, r4 | |
41 | mov r8, r4 | |
42 | mov r9, r4 | |
43 | mov r10, r4 | |
44 | mov r11, r4 | |
45 | mov ip, r4 | |
46 | ||
47 | /* Save and clear callee-saved registers only if we are dealing with hard float | |
48 | ABI. The unused caller-saved registers have already been cleared by GCC | |
49 | generated code. */ | |
50 | #ifdef __ARM_PCS_VFP | |
51 | vpush.f64 {d8-d15} | |
52 | mov r5, #0 | |
53 | vmov d8, r5, r5 | |
54 | #if __ARM_FP & 0x04 | |
55 | vmov s18, s19, r5, r5 | |
56 | vmov s20, s21, r5, r5 | |
57 | vmov s22, s23, r5, r5 | |
58 | vmov s24, s25, r5, r5 | |
59 | vmov s26, s27, r5, r5 | |
60 | vmov s28, s29, r5, r5 | |
61 | vmov s30, s31, r5, r5 | |
c5ed0148 | 62 | #elif (__ARM_FP & 0x8) || (__ARM_FEATURE_MVE & 1) |
c92e08e3 AV |
63 | vmov.f64 d9, d8 |
64 | vmov.f64 d10, d8 | |
65 | vmov.f64 d11, d8 | |
66 | vmov.f64 d12, d8 | |
67 | vmov.f64 d13, d8 | |
68 | vmov.f64 d14, d8 | |
69 | vmov.f64 d15, d8 | |
70 | #else | |
71 | #error "Half precision implementation not supported." | |
72 | #endif | |
73 | /* Clear the cumulative exception-status bits (0-4,7) and the | |
74 | condition code bits (28-31) of the FPSCR. */ | |
75 | vmrs r5, fpscr | |
76 | movw r6, #65376 | |
77 | movt r6, #4095 | |
78 | ands r5, r6 | |
79 | vmsr fpscr, r5 | |
80 | ||
81 | /* We are not dealing with hard float ABI, so we can safely use the vlstm and | |
82 | vlldm instructions without needing to preserve the registers used for | |
83 | argument passing. */ | |
84 | #else | |
85 | sub sp, sp, #0x88 /* Reserve stack space to save all floating point | |
86 | registers, including FPSCR. */ | |
87 | vlstm sp /* Lazy store and clearance of d0-d16 and FPSCR. */ | |
88 | #endif /* __ARM_PCS_VFP */ | |
89 | ||
90 | /* Make sure to clear the 'GE' bits of the APSR register if 32-bit SIMD | |
91 | instructions are available. */ | |
92 | #if defined(__ARM_FEATURE_SIMD32) | |
93 | msr APSR_nzcvqg, r4 | |
94 | #else | |
95 | msr APSR_nzcvq, r4 | |
96 | #endif | |
97 | ||
98 | mov r5, r4 | |
99 | mov r6, r4 | |
100 | blxns r4 | |
101 | ||
102 | #ifdef __ARM_PCS_VFP | |
103 | vpop.f64 {d8-d15} | |
104 | #else | |
574e7950 RE |
105 | /* VLLDM erratum mitigation sequence. */ |
106 | mrs r5, control | |
107 | tst r5, #8 /* CONTROL_S.SFPA */ | |
108 | it ne | |
109 | .inst.w 0xeeb00a40 /* vmovne s0, s0 */ | |
c92e08e3 AV |
110 | vlldm sp /* Lazy restore of d0-d16 and FPSCR. */ |
111 | add sp, sp, #0x88 /* Free space used to save floating point registers. */ | |
112 | #endif /* __ARM_PCS_VFP */ | |
113 | ||
114 | pop {r5-r11, pc} | |
115 | ||
116 | #elif defined (__ARM_ARCH_8M_BASE__) | |
117 | push {r5-r7, lr} | |
118 | mov r5, r8 | |
119 | mov r6, r9 | |
120 | mov r7, r10 | |
121 | push {r5-r7} | |
122 | mov r5, r11 | |
123 | push {r5} | |
124 | mov r5, r4 | |
125 | mov r6, r4 | |
126 | mov r7, r4 | |
127 | mov r8, r4 | |
128 | mov r9, r4 | |
129 | mov r10, r4 | |
130 | mov r11, r4 | |
131 | mov ip, r4 | |
132 | msr APSR_nzcvq, r4 | |
133 | blxns r4 | |
134 | pop {r5} | |
135 | mov r11, r5 | |
136 | pop {r5-r7} | |
137 | mov r10, r7 | |
138 | mov r9, r6 | |
139 | mov r8, r5 | |
140 | pop {r5-r7, pc} | |
141 | ||
142 | #else | |
143 | #error "This should only be used for armv8-m base- and mainline." | |
144 | #endif |