plugins: Use emit_before_op for PLUGIN_GEN_FROM_INSN

Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2024-03-14 11:27:35 -10:00
parent 74bb8acc6a
commit ac977170bf
3 changed files with 67 additions and 228 deletions

View file

@ -98,30 +98,6 @@ void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
void *userdata)
{ }
static void gen_empty_udata_cb(void (*gen_helper)(TCGv_i32, TCGv_ptr))
{
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
TCGv_ptr udata = tcg_temp_ebb_new_ptr();
tcg_gen_movi_ptr(udata, 0);
tcg_gen_ld_i32(cpu_index, tcg_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
gen_helper(cpu_index, udata);
tcg_temp_free_ptr(udata);
tcg_temp_free_i32(cpu_index);
}
static void gen_empty_udata_cb_no_wg(void)
{
gen_empty_udata_cb(gen_helper_plugin_vcpu_udata_cb_no_wg);
}
static void gen_empty_udata_cb_no_rwg(void)
{
gen_empty_udata_cb(gen_helper_plugin_vcpu_udata_cb_no_rwg);
}
/*
* For now we only support addi_i64.
* When we support more ops, we can generate one empty inline cb for each.
@ -170,51 +146,19 @@ static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
tcg_temp_free_i32(cpu_index);
}
/*
* Share the same function for enable/disable. When enabling, the NULL
* pointer will be overwritten later.
*/
static void gen_empty_mem_helper(void)
{
TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
tcg_gen_movi_ptr(ptr, 0);
tcg_gen_st_ptr(ptr, tcg_env, offsetof(CPUState, plugin_mem_cbs) -
offsetof(ArchCPU, env));
tcg_temp_free_ptr(ptr);
}
static void gen_plugin_cb_start(enum plugin_gen_from from,
enum plugin_gen_cb type, unsigned wr)
{
tcg_gen_plugin_cb_start(from, type, wr);
}
static void gen_wrapped(enum plugin_gen_from from,
enum plugin_gen_cb type, void (*func)(void))
{
gen_plugin_cb_start(from, type, 0);
func();
tcg_gen_plugin_cb_end();
}
static void plugin_gen_empty_callback(enum plugin_gen_from from)
{
switch (from) {
case PLUGIN_GEN_AFTER_INSN:
case PLUGIN_GEN_FROM_TB:
tcg_gen_plugin_cb(from);
break;
case PLUGIN_GEN_FROM_INSN:
/*
* Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being
* the first callback of an instruction
*/
gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER,
gen_empty_mem_helper);
gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb_no_rwg);
gen_wrapped(from, PLUGIN_GEN_CB_UDATA_R, gen_empty_udata_cb_no_wg);
gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb);
tcg_gen_plugin_cb(from);
break;
default:
g_assert_not_reached();
@ -368,18 +312,6 @@ static TCGOp *copy_mul_i32(TCGOp **begin_op, TCGOp *op, uint32_t v)
return op;
}
static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
{
if (UINTPTR_MAX == UINT32_MAX) {
/* st_i32 */
op = copy_op(begin_op, op, INDEX_op_st_i32);
} else {
/* st_i64 */
op = copy_st_i64(begin_op, op);
}
return op;
}
static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *func, int *cb_idx)
{
TCGOp *old_op;
@ -403,32 +335,6 @@ static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *func, int *cb_idx)
return op;
}
/*
* When we append/replace ops here we are sensitive to changing patterns of
* TCGOps generated by the tcg_gen_FOO calls when we generated the
* empty callbacks. This will assert very quickly in a debug build as
* we assert the ops we are replacing are the correct ones.
*/
static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
TCGOp *begin_op, TCGOp *op, int *cb_idx)
{
/* const_ptr */
op = copy_const_ptr(&begin_op, op, cb->userp);
/* copy the ld_i32, but note that we only have to copy it once */
if (*cb_idx == -1) {
op = copy_op(&begin_op, op, INDEX_op_ld_i32);
} else {
begin_op = QTAILQ_NEXT(begin_op, link);
tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
}
/* call */
op = copy_call(&begin_op, op, cb->regular.f.vcpu_udata, cb_idx);
return op;
}
static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
TCGOp *begin_op, TCGOp *op,
int *unused)
@ -482,11 +388,6 @@ typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb,
TCGOp *begin_op, TCGOp *op, int *intp);
typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb);
static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
{
return true;
}
static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
{
int w;
@ -524,12 +425,6 @@ static void inject_cb_type(const GArray *cbs, TCGOp *begin_op,
rm_ops_range(begin_op, end_op);
}
static void
inject_udata_cb(const GArray *cbs, TCGOp *begin_op)
{
inject_cb_type(cbs, begin_op, append_udata_cb, op_ok);
}
static void
inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok)
{
@ -542,71 +437,6 @@ inject_mem_cb(const GArray *cbs, TCGOp *begin_op)
inject_cb_type(cbs, begin_op, append_mem_cb, op_rw);
}
/* we could change the ops in place, but we can reuse more code by copying */
static void inject_mem_helper(TCGOp *begin_op, GArray *arr)
{
TCGOp *orig_op = begin_op;
TCGOp *end_op;
TCGOp *op;
end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
tcg_debug_assert(end_op);
/* const ptr */
op = copy_const_ptr(&begin_op, end_op, arr);
/* st_ptr */
op = copy_st_ptr(&begin_op, op);
rm_ops_range(orig_op, end_op);
}
/*
* Tracking memory accesses performed from helpers requires extra work.
* If an instruction is emulated with helpers, we do two things:
* (1) copy the CB descriptors, and keep track of it so that they can be
* freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so
* that we can read them at run-time (i.e. when the helper executes).
* This run-time access is performed from qemu_plugin_vcpu_mem_cb.
*
* Note that plugin_gen_disable_mem_helpers undoes (2). Since it
* is possible that the code we generate after the instruction is
* dead, we also add checks before generating tb_exit etc.
*/
static void inject_mem_enable_helper(struct qemu_plugin_tb *ptb,
struct qemu_plugin_insn *plugin_insn,
TCGOp *begin_op)
{
GArray *cbs[2];
GArray *arr;
size_t n_cbs, i;
cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
n_cbs = 0;
for (i = 0; i < ARRAY_SIZE(cbs); i++) {
n_cbs += cbs[i]->len;
}
plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs;
if (likely(!plugin_insn->mem_helper)) {
rm_ops(begin_op);
return;
}
ptb->mem_helper = true;
arr = g_array_sized_new(false, false,
sizeof(struct qemu_plugin_dyn_cb), n_cbs);
for (i = 0; i < ARRAY_SIZE(cbs); i++) {
g_array_append_vals(arr, cbs[i]->data, cbs[i]->len);
}
qemu_plugin_add_dyn_cb_arr(arr);
inject_mem_helper(begin_op, arr);
}
/* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
void plugin_gen_disable_mem_helpers(void)
{
@ -615,30 +445,6 @@ void plugin_gen_disable_mem_helpers(void)
}
}
static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb,
TCGOp *begin_op, int insn_idx)
{
struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op);
}
static void plugin_gen_insn_udata_r(const struct qemu_plugin_tb *ptb,
TCGOp *begin_op, int insn_idx)
{
struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR_R], begin_op);
}
static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb,
TCGOp *begin_op, int insn_idx)
{
struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE],
begin_op, op_ok);
}
static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb,
TCGOp *begin_op, int insn_idx)
{
@ -656,11 +462,51 @@ static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb,
inject_inline_cb(cbs, begin_op, op_rw);
}
static void plugin_gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
TCGOp *begin_op, int insn_idx)
static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
struct qemu_plugin_insn *insn)
{
struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
inject_mem_enable_helper(ptb, insn, begin_op);
GArray *cbs[2];
GArray *arr;
size_t n_cbs;
/*
* Tracking memory accesses performed from helpers requires extra work.
* If an instruction is emulated with helpers, we do two things:
* (1) copy the CB descriptors, and keep track of it so that they can be
* freed later on, and (2) point CPUState.plugin_mem_cbs to the
* descriptors, so that we can read them at run-time
* (i.e. when the helper executes).
* This run-time access is performed from qemu_plugin_vcpu_mem_cb.
*
* Note that plugin_gen_disable_mem_helpers undoes (2). Since it
* is possible that the code we generate after the instruction is
* dead, we also add checks before generating tb_exit etc.
*/
if (!insn->calls_helpers) {
return;
}
cbs[0] = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
cbs[1] = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
n_cbs = cbs[0]->len + cbs[1]->len;
if (n_cbs == 0) {
insn->mem_helper = false;
return;
}
insn->mem_helper = true;
ptb->mem_helper = true;
arr = g_array_sized_new(false, false,
sizeof(struct qemu_plugin_dyn_cb), n_cbs);
g_array_append_vals(arr, cbs[0]->data, cbs[0]->len);
g_array_append_vals(arr, cbs[1]->data, cbs[1]->len);
qemu_plugin_add_dyn_cb_arr(arr);
tcg_gen_st_ptr(tcg_constant_ptr((intptr_t)arr), tcg_env,
offsetof(CPUState, plugin_mem_cbs) -
offsetof(ArchCPU, env));
}
static void gen_disable_mem_helper(void)
@ -824,6 +670,26 @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
}
break;
case PLUGIN_GEN_FROM_INSN:
assert(insn != NULL);
gen_enable_mem_helper(plugin_tb, insn);
cbs = insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR];
for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
struct qemu_plugin_dyn_cb *cb =
&g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
gen_udata_cb(cb);
}
cbs = insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE];
for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
struct qemu_plugin_dyn_cb *cb =
&g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
gen_inline_cb(cb);
}
break;
default:
g_assert_not_reached();
}
@ -839,28 +705,6 @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
enum plugin_gen_cb type = op->args[1];
switch (from) {
case PLUGIN_GEN_FROM_INSN:
{
g_assert(insn_idx >= 0);
switch (type) {
case PLUGIN_GEN_CB_UDATA:
plugin_gen_insn_udata(plugin_tb, op, insn_idx);
break;
case PLUGIN_GEN_CB_UDATA_R:
plugin_gen_insn_udata_r(plugin_tb, op, insn_idx);
break;
case PLUGIN_GEN_CB_INLINE:
plugin_gen_insn_inline(plugin_tb, op, insn_idx);
break;
case PLUGIN_GEN_ENABLE_MEM_HELPER:
plugin_gen_enable_mem_helper(plugin_tb, op, insn_idx);
break;
default:
g_assert_not_reached();
}
break;
}
case PLUGIN_GEN_FROM_MEM:
{
g_assert(insn_idx >= 0);

View file

@ -74,7 +74,6 @@ enum plugin_dyn_cb_type {
enum plugin_dyn_cb_subtype {
PLUGIN_CB_REGULAR,
PLUGIN_CB_REGULAR_R,
PLUGIN_CB_INLINE,
PLUGIN_N_CB_SUBTYPES,
};

View file

@ -115,12 +115,8 @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn,
void *udata)
{
if (!insn->mem_only) {
int index = flags == QEMU_PLUGIN_CB_R_REGS ||
flags == QEMU_PLUGIN_CB_RW_REGS ?
PLUGIN_CB_REGULAR_R : PLUGIN_CB_REGULAR;
plugin_register_dyn_cb__udata(&insn->cbs[PLUGIN_CB_INSN][index],
cb, flags, udata);
plugin_register_dyn_cb__udata(
&insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], cb, flags, udata);
}
}