Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

i#3544 RV64: Refine fault translation and signal handling #6461

Merged
merged 11 commits into from
Nov 28, 2023
8 changes: 8 additions & 0 deletions core/arch/arch.c
Original file line number Diff line number Diff line change
Expand Up @@ -3866,6 +3866,14 @@ set_stolen_reg_val(priv_mcontext_t *mc, reg_t newval)
{
*(reg_t *)(((byte *)mc) + opnd_get_reg_dcontext_offs(dr_reg_stolen)) = newval;
}

# ifdef RISCV64
void
set_tp_reg_val(priv_mcontext_t *mc, reg_t newval)
{
*(reg_t *)(((byte *)mc) + opnd_get_reg_dcontext_offs(DR_REG_TP)) = newval;
}
# endif
#endif

#ifdef PROFILE_RDTSC
Expand Down
2 changes: 1 addition & 1 deletion core/arch/arch.h
Original file line number Diff line number Diff line change
Expand Up @@ -1569,7 +1569,7 @@ translate_x86_to_x64(dcontext_t *dcontext, instrlist_t *ilist,
DR_PARAM_INOUT instr_t **instr);
#endif

#ifdef AARCHXX
#if defined(AARCHXX) || defined(RISCV64)
bool
instr_is_ldstex_mangling(dcontext_t *dcontext, instr_t *inst);
#endif
Expand Down
4 changes: 4 additions & 0 deletions core/arch/arch_exports.h
Original file line number Diff line number Diff line change
Expand Up @@ -470,6 +470,10 @@ reg_t
get_stolen_reg_val(priv_mcontext_t *context);
void
set_stolen_reg_val(priv_mcontext_t *mc, reg_t newval);
# ifdef RISCV64
void
set_tp_reg_val(priv_mcontext_t *mc, reg_t newval);
# endif
#endif
const char *
get_branch_type_name(ibl_branch_type_t branch_type);
Expand Down
45 changes: 43 additions & 2 deletions core/arch/riscv64/mangle.c
Original file line number Diff line number Diff line change
Expand Up @@ -272,8 +272,7 @@ patch_mov_immed_arch(dcontext_t *dcontext, ptr_int_t val, byte *pc, instr_t *fir
bool
instr_check_xsp_mangling(dcontext_t *dcontext, instr_t *inst, int *xsp_adjust)
{
/* FIXME i#3544: Not implemented */
ASSERT_NOT_IMPLEMENTED(false);
/* Not apply to RISC-V. */
ksco marked this conversation as resolved.
Show resolved Hide resolved
return false;
}

Expand Down Expand Up @@ -648,6 +647,48 @@ mangle_special_registers(dcontext_t *dcontext, instrlist_t *ilist, instr_t *inst
return next_instr;
}

/***************************************************************************
* LR/SC sequence mangling.
*/

bool
instr_is_ldstex_mangling(dcontext_t *dcontext, instr_t *instr)
{
/* This should be kept in sync with mangle_exclusive_monitor_op(). */
if (!instr_is_our_mangling(instr))
return false;

opnd_t memop = opnd_create_null();
if (instr_get_opcode(instr) == OP_sd)
memop = instr_get_src(instr, 0);
else if (instr_get_opcode(instr) == OP_ld)
memop = instr_get_dst(instr, 0);
if (opnd_is_base_disp(memop)) {
ASSERT(opnd_get_index(memop) == DR_REG_NULL && opnd_get_scale(memop) == 0);
uint offs = opnd_get_disp(memop);
if (opnd_get_base(memop) == dr_reg_stolen && offs >= TLS_LRSC_ADDR_SLOT &&
offs <= TLS_LRSC_SIZE_SLOT)
return true;
}

ptr_int_t val;
if (instr_get_opcode(instr) == OP_fence || instr_get_opcode(instr) == OP_bne ||
/* Check for sc.w/d+bne+jal pattern. */
(instr_get_opcode(instr) == OP_jal && instr_get_prev(instr) != NULL &&
instr_get_opcode(instr_get_prev(instr)) == OP_bne &&
instr_get_prev(instr_get_prev(instr)) != NULL &&
instr_is_exclusive_store(instr_get_prev(instr_get_prev(instr)))) ||
instr_is_exclusive_load(instr) || instr_is_exclusive_store(instr) ||
(instr_is_mov_constant(instr, &val) &&
/* XXX: These are fragile, should we look backward a bit to check for more
specific patterns? */
(val == 1 /* cas fail */ || val == -1 /* reservation invalidation */ ||
ksco marked this conversation as resolved.
Show resolved Hide resolved
val == 4 /* lr.w/sc.w size */ || val == 8 /* lr.d/sc.d size */)))
return true;

return false;
}

static instr_t *
mangle_exclusive_load(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
Expand Down
2 changes: 1 addition & 1 deletion core/arch/x86_code.c
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ new_thread_setup(priv_mcontext_t *mc)
ASSERT(rc != -1); /* this better be a new thread */
dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
# ifdef AARCHXX
# if defined(AARCHXX) || defined(RISCV64)
set_app_lib_tls_base_from_clone_record(dcontext, crec);
# endif
# ifdef ARM
Expand Down
27 changes: 17 additions & 10 deletions core/translate.c
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,8 @@ instr_is_inline_syscall_jmp(dcontext_t *dcontext, instr_t *inst)
/* A32 uses a regular jump */
instr_get_opcode(inst) == OP_b) &&
opnd_is_instr(instr_get_target(inst)));
# elif defined(RISCV64)
return (instr_get_opcode(inst) == OP_jal && opnd_is_instr(instr_get_target(inst)));
# else
ASSERT_NOT_IMPLEMENTED(false);
return false;
Expand Down Expand Up @@ -304,14 +306,14 @@ translate_walk_track_pre_instr(dcontext_t *tdcontext, instr_t *inst,
walk->unsupported_mangle = false;
walk->xsp_adjust = 0;
for (reg_id_t r = 0; r < REG_SPILL_NUM; r++) {
#ifndef AARCHXX
#ifdef X86
/* we should have seen a restore for every spill, unless at
* fragment-ending jump to ibl, which shouldn't come here
*/
ASSERT(walk->reg_spill_offs[r] == UINT_MAX);
walk->reg_spill_offs[r] = UINT_MAX; /* be paranoid */
#else
/* On AArchXX we do spill registers across app instrs and mangle
/* On AArchXX/RISCV64 we do spill registers across app instrs and mangle
* regions, though right now only the following routines do this:
* - mangle_stolen_reg()
* - mangle_gpr_list_read()
Expand Down Expand Up @@ -399,9 +401,6 @@ translate_walk_track_post_instr(dcontext_t *tdcontext, instr_t *inst,
* comment above for post-mangling traces), and so for local
* spills like rip-rel and ind branches this is fine.
*/
#if defined(RISCV64)
ASSERT_NOT_IMPLEMENTED(false);
#endif
if (instr_is_cti(inst) &&
#ifdef X86
/* Do not reset for a trace-cmp jecxz or jmp (32-bit) or
Expand All @@ -420,10 +419,7 @@ translate_walk_track_post_instr(dcontext_t *tdcontext, instr_t *inst,
(!opnd_is_pc(instr_get_target(inst)) ||
(opnd_get_pc(instr_get_target(inst)) >= walk->start_cache &&
opnd_get_pc(instr_get_target(inst)) < walk->end_cache))))
#elif defined(RISCV64)
/* FIXME i#3544: Not implemented */
false
#else
#elif defined(AARCHXX)
/* Do not reset for cbnz/bne in ldstex mangling, nor for the b after strex. */
!(instr_get_opcode(inst) == OP_cbnz ||
(instr_get_opcode(inst) == OP_b &&
Expand All @@ -432,6 +428,17 @@ translate_walk_track_post_instr(dcontext_t *tdcontext, instr_t *inst,
(instr_get_opcode(inst) == OP_b &&
(instr_get_prev(inst) != NULL &&
instr_is_exclusive_store(instr_get_prev(inst)))))
#elif defined(RISCV64)
/* Do not reset for bne in LR/SC mangling, nor for the jal after SC.
* This should be kept in sync with mangle_exclusive_monitor_op().
*/
!(instr_get_opcode(inst) == OP_bne ||
(instr_get_opcode(inst) == OP_jal && instr_get_prev(inst) != NULL &&
instr_get_opcode(instr_get_prev(inst)) == OP_bne &&
instr_get_prev(instr_get_prev(inst)) != NULL &&
instr_is_exclusive_store(instr_get_prev(instr_get_prev(inst)))))
#else
# error Unsupported architecture
#endif
) {
/* FIXME i#1551: add ARM version of the series of trace cti checks above */
Expand Down Expand Up @@ -547,7 +554,7 @@ translate_walk_track_post_instr(dcontext_t *tdcontext, instr_t *inst,
/* nothing to do */
}
#endif
#ifdef AARCHXX
#if defined(AARCHXX) || defined(RISCV64)
else if (instr_is_ldstex_mangling(tdcontext, inst)) {
/* nothing to do */
}
Expand Down
1 change: 1 addition & 0 deletions core/unix/os_public.h
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,7 @@ typedef kernel_sigcontext_t sigcontext_t;
# define SC_SYSNUM_REG SC_R7
# define SC_RETURN_REG SC_R0
#elif defined(RISCV64)
# define SC_TP SC_FIELD(sc_regs.tp)
# define SC_A0 SC_FIELD(sc_regs.a0)
# define SC_A1 SC_FIELD(sc_regs.a1)
# define SC_A2 SC_FIELD(sc_regs.a2)
Expand Down
64 changes: 47 additions & 17 deletions core/unix/signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -3051,20 +3051,36 @@ mcontext_to_ucontext(kernel_ucontext_t *uc, priv_mcontext_t *mc)
mcontext_to_sigcontext(&sc_full, mc, DR_MC_ALL);
}

#ifdef AARCHXX
#if defined(AARCHXX) || defined(RISCV64)
static void
set_sigcxt_stolen_reg(sigcontext_t *sc, reg_t val)
{
*(&sc->SC_R0 + (dr_reg_stolen - DR_REG_R0)) = val;
*(&sc->IF_AARCHXX_ELSE(SC_R0, SC_A0) +
(dr_reg_stolen - IF_AARCHXX_ELSE(DR_REG_R0, DR_REG_A0))) = val;
}

static reg_t
get_sigcxt_stolen_reg(sigcontext_t *sc)
{
return *(&sc->SC_R0 + (dr_reg_stolen - DR_REG_R0));
return *(&sc->IF_AARCHXX_ELSE(SC_R0, SC_A0) +
(dr_reg_stolen - IF_AARCHXX_ELSE(DR_REG_R0, DR_REG_A0)));
}

# ifdef RISCV64
static void
set_sigcxt_tp_reg(sigcontext_t *sc, reg_t val)
{
sc->SC_TP = val;
}

# ifndef AARCH64
static reg_t
get_sigcxt_tp_reg(sigcontext_t *sc)
{
return sc->SC_TP;
}
# endif

# ifdef ARM
static dr_isa_mode_t
get_pc_mode_from_cpsr(sigcontext_t *sc)
{
Expand Down Expand Up @@ -3251,7 +3267,11 @@ thread_set_self_context(void *cxt)
#elif defined(ARM)
asm("ldr " ASM_XSP ", %0" : : "m"(xsp_for_sigreturn));
asm("b dynamorio_sigreturn");
#endif /* X86/AARCH64/ARM */
#elif defined(RISCV)
ASSERT_NOT_TESTED();
asm("addi " ASM_XSP ", %0, 0" : : "r"(xsp_for_sigreturn));
asm("j dynamorio_sigreturn");
#endif /* X86/AARCH64/ARM/RISCV64 */
ASSERT_NOT_REACHED();
}

Expand Down Expand Up @@ -3370,7 +3390,6 @@ sig_has_restorer(thread_sig_info_t *info, int sig)
# elif defined(RISCV64)
static const byte SIGRET_NONRT[8] = { 0 }; /* unused */
static const byte SIGRET_RT[8] = { 0 }; /* unused */
;
# endif
byte buf[MAX(sizeof(SIGRET_NONRT), sizeof(SIGRET_RT))] = { 0 };
if (d_r_safe_read(info->sighand->action[sig]->restorer, sizeof(buf), buf) &&
Expand Down Expand Up @@ -4048,7 +4067,7 @@ transfer_from_sig_handler_to_fcache_return(dcontext_t *dcontext, kernel_ucontext
* still go to the private fcache_return for simplicity.
*/
sc->SC_XIP = (ptr_uint_t)fcache_return_routine(dcontext);
#if defined(AARCHXX)
#if defined(AARCHXX) || defined(RISCV64)
/* We do not have to set dr_reg_stolen in dcontext's mcontext here
* because dcontext's mcontext is stale and we used the mcontext
* created from recreate_app_state_internal with the original sigcontext.
Expand All @@ -4062,12 +4081,14 @@ transfer_from_sig_handler_to_fcache_return(dcontext_t *dcontext, kernel_ucontext
dcontext->local_state->spill_space.reg_stolen = get_sigcxt_stolen_reg(sc);
/* Now put DR's base in the sigcontext. */
set_sigcxt_stolen_reg(sc, (reg_t)*get_dr_tls_base_addr());
# ifndef AARCH64
# ifdef RISCV64
set_sigcxt_tp_reg(sc, (reg_t)read_thread_register(TLS_REG_LIB));
# endif

# ifdef ARM
/* We're going to our fcache_return gencode which uses DEFAULT_ISA_MODE */
set_pc_mode_in_cpsr(sc, DEFAULT_ISA_MODE);
# endif
#elif defined(RISCV64)
ASSERT_NOT_IMPLEMENTED(false);
#endif

#if defined(X64) || defined(ARM)
Expand Down Expand Up @@ -4626,7 +4647,7 @@ adjust_syscall_for_restart(dcontext_t *dcontext, thread_sig_info_t *info, int si
} else {
ASSERT_NOT_REACHED(); /* Inlined syscalls no longer come here. */
}
#ifdef AARCHXX
#if defined(AARCHXX) || defined(RISCV64)
/* dr_reg_stolen is holding DR's TLS on receiving a signal,
* so we need to put the app's reg value into the ucontext instead.
* The translation process normally does this for us, but here we're doing
Expand Down Expand Up @@ -4734,7 +4755,8 @@ find_next_fragment_from_gencode(dcontext_t *dcontext, sigcontext_t *sc)
if (f == NULL && sc->SC_XCX != 0)
f = fragment_lookup(dcontext, (app_pc)sc->SC_XCX);
#elif defined(RISCV64)
/* FIXME i#3544: Not implemented */
/* FIXME i#3544: Not implemented */
ASSERT_NOT_IMPLEMENTED(false);
#else
# error Unsupported arch.
#endif
Expand Down Expand Up @@ -6292,7 +6314,9 @@ execute_handler_from_dispatch(dcontext_t *dcontext, int sig)
dump_sigcontext(dcontext, sc);
LOG(THREAD, LOG_ASYNCH, 3, "\n");
}
IF_AARCHXX(ASSERT(get_sigcxt_stolen_reg(sc) != (reg_t)*get_dr_tls_base_addr()));
# if defined(AARCHXX) || defined(RISCV64)
ASSERT(get_sigcxt_stolen_reg(sc) != (reg_t)*get_dr_tls_base_addr());
# endif
#endif
/* FIXME: other state? debug regs?
* if no syscall allowed between main_ (when frame created) and
Expand Down Expand Up @@ -7370,26 +7394,32 @@ handle_sigreturn(dcontext_t *dcontext, void *ucxt_param, int style)
* look like whatever would happen to the app...
*/
ASSERT((app_pc)sc->SC_XIP != next_pc);
# if defined(AARCHXX)
# if defined(AARCHXX) || defined(RISCV64)
ASSERT(get_sigcxt_stolen_reg(sc) != (reg_t)*get_dr_tls_base_addr());
/* We're called from DR and are not yet in the cache, so we want to set the
* mcontext slot, not the TLS slot, to set the stolen reg value.
*/
set_stolen_reg_val(get_mcontext(dcontext), get_sigcxt_stolen_reg(sc));
/* The linkstub expects DR's TLS to be in the actual register. */
set_sigcxt_stolen_reg(sc, (reg_t)*get_dr_tls_base_addr());
# ifdef AARCH64

# ifdef RISCV64
set_tp_reg_val(get_mcontext(dcontext), get_sigcxt_tp_reg(sc));
set_sigcxt_tp_reg(sc, (reg_t)read_thread_register(TLS_REG_LIB));
# endif

# if defined(AARCH64)
/* On entry to the do_syscall gencode, we save X1 into TLS_REG1_SLOT.
* Then the sigreturn would redirect the flow to the fcache_return gencode.
* In fcache_return it recovers the values of x0 and x1 from TLS_SLOT 0 and 1.
*/
get_mcontext(dcontext)->r1 = sc->SC_FIELD_AARCH64(1);
# elif defined(RISCV64)
get_mcontext(dcontext)->a1 = sc->SC_FIELD(sc_regs.a1);
# else
/* We're going to our fcache_return gencode which uses DEFAULT_ISA_MODE */
set_pc_mode_in_cpsr(sc, DEFAULT_ISA_MODE);
# endif
# elif defined(RISCV64)
ASSERT_NOT_IMPLEMENTED(false);
# endif
#endif

Expand Down
34 changes: 33 additions & 1 deletion core/unix/signal_linux_riscv64.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,39 @@ save_fpstate(dcontext_t *dcontext, sigframe_rt_t *frame)
void
dump_sigcontext(dcontext_t *dcontext, sigcontext_t *sc)
{
LOG(THREAD, LOG_ASYNCH, 1, "FIXME i#3544: NYI on RISCV64");

LOG(THREAD, LOG_ASYNCH, 1, "\tpc = " PFX "\n", sc->sc_regs.pc);
LOG(THREAD, LOG_ASYNCH, 1, "\tra = " PFX "\n", sc->sc_regs.ra);
LOG(THREAD, LOG_ASYNCH, 1, "\tsp = " PFX "\n", sc->sc_regs.sp);
LOG(THREAD, LOG_ASYNCH, 1, "\tgp = " PFX "\n", sc->sc_regs.gp);
LOG(THREAD, LOG_ASYNCH, 1, "\ttp = " PFX "\n", sc->sc_regs.tp);
LOG(THREAD, LOG_ASYNCH, 1, "\tt0 = " PFX "\n", sc->sc_regs.t0);
LOG(THREAD, LOG_ASYNCH, 1, "\tt1 = " PFX "\n", sc->sc_regs.t1);
LOG(THREAD, LOG_ASYNCH, 1, "\tt2 = " PFX "\n", sc->sc_regs.t2);
LOG(THREAD, LOG_ASYNCH, 1, "\ts0 = " PFX "\n", sc->sc_regs.s0);
LOG(THREAD, LOG_ASYNCH, 1, "\ts1 = " PFX "\n", sc->sc_regs.s1);
LOG(THREAD, LOG_ASYNCH, 1, "\ta0 = " PFX "\n", sc->sc_regs.a0);
LOG(THREAD, LOG_ASYNCH, 1, "\ta1 = " PFX "\n", sc->sc_regs.a1);
LOG(THREAD, LOG_ASYNCH, 1, "\ta2 = " PFX "\n", sc->sc_regs.a2);
LOG(THREAD, LOG_ASYNCH, 1, "\ta3 = " PFX "\n", sc->sc_regs.a3);
LOG(THREAD, LOG_ASYNCH, 1, "\ta4 = " PFX "\n", sc->sc_regs.a4);
LOG(THREAD, LOG_ASYNCH, 1, "\ta5 = " PFX "\n", sc->sc_regs.a5);
LOG(THREAD, LOG_ASYNCH, 1, "\ta6 = " PFX "\n", sc->sc_regs.a6);
LOG(THREAD, LOG_ASYNCH, 1, "\ta7 = " PFX "\n", sc->sc_regs.a7);
LOG(THREAD, LOG_ASYNCH, 1, "\ts2 = " PFX "\n", sc->sc_regs.s2);
LOG(THREAD, LOG_ASYNCH, 1, "\ts3 = " PFX "\n", sc->sc_regs.s3);
LOG(THREAD, LOG_ASYNCH, 1, "\ts4 = " PFX "\n", sc->sc_regs.s4);
LOG(THREAD, LOG_ASYNCH, 1, "\ts5 = " PFX "\n", sc->sc_regs.s5);
LOG(THREAD, LOG_ASYNCH, 1, "\ts6 = " PFX "\n", sc->sc_regs.s6);
LOG(THREAD, LOG_ASYNCH, 1, "\ts7 = " PFX "\n", sc->sc_regs.s7);
LOG(THREAD, LOG_ASYNCH, 1, "\ts8 = " PFX "\n", sc->sc_regs.s8);
LOG(THREAD, LOG_ASYNCH, 1, "\ts9 = " PFX "\n", sc->sc_regs.s9);
LOG(THREAD, LOG_ASYNCH, 1, "\ts10 = " PFX "\n", sc->sc_regs.s10);
LOG(THREAD, LOG_ASYNCH, 1, "\ts11 = " PFX "\n", sc->sc_regs.s11);
LOG(THREAD, LOG_ASYNCH, 1, "\tt3 = " PFX "\n", sc->sc_regs.t3);
LOG(THREAD, LOG_ASYNCH, 1, "\tt4 = " PFX "\n", sc->sc_regs.t4);
LOG(THREAD, LOG_ASYNCH, 1, "\tt5 = " PFX "\n", sc->sc_regs.t5);
LOG(THREAD, LOG_ASYNCH, 1, "\tt6 = " PFX "\n", sc->sc_regs.t6);
}
#endif /* DEBUG */

Expand Down
Loading
Loading