mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
selftests/rseq: Implement basic percpu ops mm_cid test
Adapt to the rseq.h API changes introduced by commits "selftests/rseq: <arch>: Template memory ordering and percpu access mode". Build a new basic_percpu_ops_mm_cid_test to test the new "mm_cid" rseq field. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20221122203932.231377-19-mathieu.desnoyers@efficios.com
This commit is contained in:
parent
171586a6ab
commit
cead720627
3 changed files with 44 additions and 8 deletions
1
tools/testing/selftests/rseq/.gitignore
vendored
1
tools/testing/selftests/rseq/.gitignore
vendored
|
@ -1,5 +1,6 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
basic_percpu_ops_test
|
||||
basic_percpu_ops_mm_cid_test
|
||||
basic_test
|
||||
basic_rseq_op_test
|
||||
param_test
|
||||
|
|
|
@ -12,7 +12,7 @@ LDLIBS += -lpthread -ldl
|
|||
# still track changes to header files and depend on shared object.
|
||||
OVERRIDE_TARGETS = 1
|
||||
|
||||
TEST_GEN_PROGS = basic_test basic_percpu_ops_test param_test \
|
||||
TEST_GEN_PROGS = basic_test basic_percpu_ops_test basic_percpu_ops_mm_cid_test param_test \
|
||||
param_test_benchmark param_test_compare_twice
|
||||
|
||||
TEST_GEN_PROGS_EXTENDED = librseq.so
|
||||
|
@ -29,6 +29,9 @@ $(OUTPUT)/librseq.so: rseq.c rseq.h rseq-*.h
|
|||
$(OUTPUT)/%: %.c $(TEST_GEN_PROGS_EXTENDED) rseq.h rseq-*.h
|
||||
$(CC) $(CFLAGS) $< $(LDLIBS) -lrseq -o $@
|
||||
|
||||
$(OUTPUT)/basic_percpu_ops_mm_cid_test: basic_percpu_ops_test.c $(TEST_GEN_PROGS_EXTENDED) rseq.h rseq-*.h
|
||||
$(CC) $(CFLAGS) -DBUILDOPT_RSEQ_PERCPU_MM_CID_ID $< $(LDLIBS) -lrseq -o $@
|
||||
|
||||
$(OUTPUT)/param_test_benchmark: param_test.c $(TEST_GEN_PROGS_EXTENDED) \
|
||||
rseq.h rseq-*.h
|
||||
$(CC) $(CFLAGS) -DBENCHMARK $< $(LDLIBS) -lrseq -o $@
|
||||
|
|
|
@ -12,6 +12,32 @@
|
|||
#include "../kselftest.h"
|
||||
#include "rseq.h"
|
||||
|
||||
#ifdef BUILDOPT_RSEQ_PERCPU_MM_CID
|
||||
# define RSEQ_PERCPU RSEQ_PERCPU_MM_CID
|
||||
static
|
||||
int get_current_cpu_id(void)
|
||||
{
|
||||
return rseq_current_mm_cid();
|
||||
}
|
||||
static
|
||||
bool rseq_validate_cpu_id(void)
|
||||
{
|
||||
return rseq_mm_cid_available();
|
||||
}
|
||||
#else
|
||||
# define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID
|
||||
static
|
||||
int get_current_cpu_id(void)
|
||||
{
|
||||
return rseq_cpu_start();
|
||||
}
|
||||
static
|
||||
bool rseq_validate_cpu_id(void)
|
||||
{
|
||||
return rseq_current_cpu_raw() >= 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct percpu_lock_entry {
|
||||
intptr_t v;
|
||||
} __attribute__((aligned(128)));
|
||||
|
@ -51,9 +77,9 @@ int rseq_this_cpu_lock(struct percpu_lock *lock)
|
|||
for (;;) {
|
||||
int ret;
|
||||
|
||||
cpu = rseq_cpu_start();
|
||||
ret = rseq_cmpeqv_storev(&lock->c[cpu].v,
|
||||
0, 1, cpu);
|
||||
cpu = get_current_cpu_id();
|
||||
ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU,
|
||||
&lock->c[cpu].v, 0, 1, cpu);
|
||||
if (rseq_likely(!ret))
|
||||
break;
|
||||
/* Retry if comparison fails or rseq aborts. */
|
||||
|
@ -141,13 +167,14 @@ void this_cpu_list_push(struct percpu_list *list,
|
|||
intptr_t *targetptr, newval, expect;
|
||||
int ret;
|
||||
|
||||
cpu = rseq_cpu_start();
|
||||
cpu = get_current_cpu_id();
|
||||
/* Load list->c[cpu].head with single-copy atomicity. */
|
||||
expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
|
||||
newval = (intptr_t)node;
|
||||
targetptr = (intptr_t *)&list->c[cpu].head;
|
||||
node->next = (struct percpu_list_node *)expect;
|
||||
ret = rseq_cmpeqv_storev(targetptr, expect, newval, cpu);
|
||||
ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU,
|
||||
targetptr, expect, newval, cpu);
|
||||
if (rseq_likely(!ret))
|
||||
break;
|
||||
/* Retry if comparison fails or rseq aborts. */
|
||||
|
@ -170,12 +197,13 @@ struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list,
|
|||
long offset;
|
||||
int ret, cpu;
|
||||
|
||||
cpu = rseq_cpu_start();
|
||||
cpu = get_current_cpu_id();
|
||||
targetptr = (intptr_t *)&list->c[cpu].head;
|
||||
expectnot = (intptr_t)NULL;
|
||||
offset = offsetof(struct percpu_list_node, next);
|
||||
load = (intptr_t *)&head;
|
||||
ret = rseq_cmpnev_storeoffp_load(targetptr, expectnot,
|
||||
ret = rseq_cmpnev_storeoffp_load(RSEQ_MO_RELAXED, RSEQ_PERCPU,
|
||||
targetptr, expectnot,
|
||||
offset, load, cpu);
|
||||
if (rseq_likely(!ret)) {
|
||||
if (_cpu)
|
||||
|
@ -295,6 +323,10 @@ int main(int argc, char **argv)
|
|||
errno, strerror(errno));
|
||||
goto error;
|
||||
}
|
||||
if (!rseq_validate_cpu_id()) {
|
||||
fprintf(stderr, "Error: cpu id getter unavailable\n");
|
||||
goto error;
|
||||
}
|
||||
printf("spinlock\n");
|
||||
test_percpu_spinlock();
|
||||
printf("percpu_list\n");
|
||||
|
|
Loading…
Reference in a new issue