#define R_LARCH_ADD_ULEB128 107
#define R_LARCH_SUB_ULEB128 108
#define R_LARCH_64_PCREL 109
+#define R_LARCH_CALL36 110
+#define R_LARCH_TLS_DESC_PC_HI20 111
+#define R_LARCH_TLS_DESC_PC_LO12 112
+#define R_LARCH_TLS_DESC64_PC_LO20 113
+#define R_LARCH_TLS_DESC64_PC_HI12 114
+#define R_LARCH_TLS_DESC_HI20 115
+#define R_LARCH_TLS_DESC_LO12 116
+#define R_LARCH_TLS_DESC64_LO20 117
+#define R_LARCH_TLS_DESC64_HI12 118
+#define R_LARCH_TLS_DESC_LD 119
+#define R_LARCH_TLS_DESC_CALL 120
+#define R_LARCH_TLS_LE_HI20_R 121
+#define R_LARCH_TLS_LE_ADD_R 122
+#define R_LARCH_TLS_LE_LO12_R 123
+#define R_LARCH_TLS_LD_PCREL20_S2 124
+#define R_LARCH_TLS_GD_PCREL20_S2 125
+#define R_LARCH_TLS_DESC_PCREL20_S2 126
+#define R_LARCH_CALL30 127
+#define R_LARCH_PCADD_HI20 128
+#define R_LARCH_PCADD_LO12 129
+#define R_LARCH_GOT_PCADD_HI20 130
+#define R_LARCH_GOT_PCADD_LO12 131
+#define R_LARCH_TLS_IE_PCADD_HI20 132
+#define R_LARCH_TLS_IE_PCADD_LO12 133
+#define R_LARCH_TLS_LD_PCADD_HI20 134
+#define R_LARCH_TLS_LD_PCADD_LO12 135
+#define R_LARCH_TLS_GD_PCADD_HI20 136
+#define R_LARCH_TLS_GD_PCADD_LO12 137
+#define R_LARCH_TLS_DESC_PCADD_HI20 138
+#define R_LARCH_TLS_DESC_PCADD_LO12 139
#ifndef ELF_ARCH
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+void loongarch_dump_regs32(u32 *uregs, const struct pt_regs *regs);
void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs);
#ifdef CONFIG_32BIT
static inline bool is_ra_save_ins(union loongarch_instruction *ip)
{
- /* st.d $ra, $sp, offset */
- return ip->reg2i12_format.opcode == std_op &&
+ const u32 opcode = IS_ENABLED(CONFIG_32BIT) ? stw_op : std_op;
+
+ /* st.w / st.d $ra, $sp, offset */
+ return ip->reg2i12_format.opcode == opcode &&
ip->reg2i12_format.rj == LOONGARCH_GPR_SP &&
ip->reg2i12_format.rd == LOONGARCH_GPR_RA &&
!is_imm12_negative(ip->reg2i12_format.immediate);
static inline bool is_stack_alloc_ins(union loongarch_instruction *ip)
{
- /* addi.d $sp, $sp, -imm */
- return ip->reg2i12_format.opcode == addid_op &&
+ const u32 opcode = IS_ENABLED(CONFIG_32BIT) ? addiw_op : addid_op;
+
+ /* addi.w / addi.d $sp, $sp, -imm */
+ return ip->reg2i12_format.opcode == opcode &&
ip->reg2i12_format.rj == LOONGARCH_GPR_SP &&
ip->reg2i12_format.rd == LOONGARCH_GPR_SP &&
is_imm12_negative(ip->reg2i12_format.immediate);
struct user_watch_state {
__u64 dbg_info;
struct {
+#if __BITS_PER_LONG == 32
+ __u32 addr;
+ __u32 mask;
+#else
__u64 addr;
__u64 mask;
+#endif
__u32 ctrl;
__u32 pad;
} dbg_regs[8];
struct user_watch_state_v2 {
__u64 dbg_info;
struct {
+#if __BITS_PER_LONG == 32
+ __u32 addr;
+ __u32 mask;
+#else
__u64 addr;
__u64 mask;
+#endif
__u32 ctrl;
__u32 pad;
} dbg_regs[14];
EX fld.d $f31, \base, (31 * FPU_REG_WIDTH)
.endm
+#ifdef CONFIG_32BIT
+ .macro sc_save_fcc thread tmp0 tmp1
+ movcf2gr \tmp0, $fcc0
+ move \tmp1, \tmp0
+ movcf2gr \tmp0, $fcc1
+ bstrins.w \tmp1, \tmp0, 15, 8
+ movcf2gr \tmp0, $fcc2
+ bstrins.w \tmp1, \tmp0, 23, 16
+ movcf2gr \tmp0, $fcc3
+ bstrins.w \tmp1, \tmp0, 31, 24
+ EX st.w \tmp1, \thread, THREAD_FCC
+ movcf2gr \tmp0, $fcc4
+ move \tmp1, \tmp0
+ movcf2gr \tmp0, $fcc5
+ bstrins.w \tmp1, \tmp0, 15, 8
+ movcf2gr \tmp0, $fcc6
+ bstrins.w \tmp1, \tmp0, 23, 16
+ movcf2gr \tmp0, $fcc7
+ bstrins.w \tmp1, \tmp0, 31, 24
+ EX st.w \tmp1, \thread, (THREAD_FCC + 4)
+ .endm
+
+ .macro sc_restore_fcc thread tmp0 tmp1
+ EX ld.w \tmp0, \thread, THREAD_FCC
+ bstrpick.w \tmp1, \tmp0, 7, 0
+ movgr2cf $fcc0, \tmp1
+ bstrpick.w \tmp1, \tmp0, 15, 8
+ movgr2cf $fcc1, \tmp1
+ bstrpick.w \tmp1, \tmp0, 23, 16
+ movgr2cf $fcc2, \tmp1
+ bstrpick.w \tmp1, \tmp0, 31, 24
+ movgr2cf $fcc3, \tmp1
+ EX ld.w \tmp0, \thread, (THREAD_FCC + 4)
+ bstrpick.w \tmp1, \tmp0, 7, 0
+ movgr2cf $fcc4, \tmp1
+ bstrpick.w \tmp1, \tmp0, 15, 8
+ movgr2cf $fcc5, \tmp1
+ bstrpick.w \tmp1, \tmp0, 23, 16
+ movgr2cf $fcc6, \tmp1
+ bstrpick.w \tmp1, \tmp0, 31, 24
+ movgr2cf $fcc7, \tmp1
+ .endm
+#else
.macro sc_save_fcc base, tmp0, tmp1
movcf2gr \tmp0, $fcc0
move \tmp1, \tmp0
bstrpick.d \tmp1, \tmp0, 63, 56
movgr2cf $fcc7, \tmp1
.endm
+#endif
.macro sc_save_fcsr base, tmp0
movfcsr2gr \tmp0, fcsr0
li.w t1, -1 # SNaN
+#ifdef CONFIG_32BIT
+ movgr2fr.w $f0, t1
+ movgr2frh.w $f0, t1
+ movgr2fr.w $f1, t1
+ movgr2frh.w $f1, t1
+ movgr2fr.w $f2, t1
+ movgr2frh.w $f2, t1
+ movgr2fr.w $f3, t1
+ movgr2frh.w $f3, t1
+ movgr2fr.w $f4, t1
+ movgr2frh.w $f4, t1
+ movgr2fr.w $f5, t1
+ movgr2frh.w $f5, t1
+ movgr2fr.w $f6, t1
+ movgr2frh.w $f6, t1
+ movgr2fr.w $f7, t1
+ movgr2frh.w $f7, t1
+ movgr2fr.w $f8, t1
+ movgr2frh.w $f8, t1
+ movgr2fr.w $f9, t1
+ movgr2frh.w $f9, t1
+ movgr2fr.w $f10, t1
+ movgr2frh.w $f10, t1
+ movgr2fr.w $f11, t1
+ movgr2frh.w $f11, t1
+ movgr2fr.w $f12, t1
+ movgr2frh.w $f12, t1
+ movgr2fr.w $f13, t1
+ movgr2frh.w $f13, t1
+ movgr2fr.w $f14, t1
+ movgr2frh.w $f14, t1
+ movgr2fr.w $f15, t1
+ movgr2frh.w $f15, t1
+ movgr2fr.w $f16, t1
+ movgr2frh.w $f16, t1
+ movgr2fr.w $f17, t1
+ movgr2frh.w $f17, t1
+ movgr2fr.w $f18, t1
+ movgr2frh.w $f18, t1
+ movgr2fr.w $f19, t1
+ movgr2frh.w $f19, t1
+ movgr2fr.w $f20, t1
+ movgr2frh.w $f20, t1
+ movgr2fr.w $f21, t1
+ movgr2frh.w $f21, t1
+ movgr2fr.w $f22, t1
+ movgr2frh.w $f22, t1
+ movgr2fr.w $f23, t1
+ movgr2frh.w $f23, t1
+ movgr2fr.w $f24, t1
+ movgr2frh.w $f24, t1
+ movgr2fr.w $f25, t1
+ movgr2frh.w $f25, t1
+ movgr2fr.w $f26, t1
+ movgr2frh.w $f26, t1
+ movgr2fr.w $f27, t1
+ movgr2frh.w $f27, t1
+ movgr2fr.w $f28, t1
+ movgr2frh.w $f28, t1
+ movgr2fr.w $f29, t1
+ movgr2frh.w $f29, t1
+ movgr2fr.w $f30, t1
+ movgr2frh.w $f30, t1
+ movgr2fr.w $f31, t1
+ movgr2frh.w $f31, t1
+#else
movgr2fr.d $f0, t1
movgr2fr.d $f1, t1
movgr2fr.d $f2, t1
movgr2fr.d $f29, t1
movgr2fr.d $f30, t1
movgr2fr.d $f31, t1
+#endif
jr ra
SYM_FUNC_END(_init_fpu)
nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace);
}
-#ifdef CONFIG_64BIT
+#ifdef CONFIG_32BIT
+void loongarch_dump_regs32(u32 *uregs, const struct pt_regs *regs)
+#else
void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs)
+#endif
{
unsigned int i;
uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg;
uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat;
}
-#endif /* CONFIG_64BIT */
struct perf_event_attr attr;
/* Kernel-space address cannot be monitored by user-space */
+#ifdef CONFIG_32BIT
+ if ((unsigned long)addr >= KPRANGE0)
+ return -EINVAL;
+#else
if ((unsigned long)addr >= XKPRANGE)
return -EINVAL;
+#endif
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
if (IS_ERR(bp))
*/
.align 5
SYM_FUNC_START(__switch_to)
- csrrd t1, LOONGARCH_CSR_PRMD
- stptr.d t1, a0, THREAD_CSRPRMD
+#ifdef CONFIG_32BIT
+ PTR_ADDI a0, a0, TASK_STRUCT_OFFSET
+ PTR_ADDI a1, a1, TASK_STRUCT_OFFSET
+#endif
+ csrrd t1, LOONGARCH_CSR_PRMD
+ LONG_SPTR t1, a0, (THREAD_CSRPRMD - TASK_STRUCT_OFFSET)
cpu_save_nonscratch a0
- stptr.d ra, a0, THREAD_REG01
- stptr.d a3, a0, THREAD_SCHED_RA
- stptr.d a4, a0, THREAD_SCHED_CFA
+ LONG_SPTR a3, a0, (THREAD_SCHED_RA - TASK_STRUCT_OFFSET)
+ LONG_SPTR a4, a0, (THREAD_SCHED_CFA - TASK_STRUCT_OFFSET)
+
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
- la t7, __stack_chk_guard
- ldptr.d t8, a1, TASK_STACK_CANARY
- stptr.d t8, t7, 0
+ la t7, __stack_chk_guard
+ LONG_LPTR t8, a1, (TASK_STACK_CANARY - TASK_STRUCT_OFFSET)
+ LONG_SPTR t8, t7, 0
#endif
+
move tp, a2
cpu_restore_nonscratch a1
PTR_ADD t0, t0, tp
set_saved_sp t0, t1, t2
- ldptr.d t1, a1, THREAD_CSRPRMD
- csrwr t1, LOONGARCH_CSR_PRMD
+ LONG_LPTR t1, a1, (THREAD_CSRPRMD - TASK_STRUCT_OFFSET)
+ csrwr t1, LOONGARCH_CSR_PRMD
+#ifdef CONFIG_32BIT
+ PTR_ADDI a0, a0, -TASK_STRUCT_OFFSET
+#endif
jr ra
SYM_FUNC_END(__switch_to)