freebsd-src/sys/dev/hwpmc/hwpmc_uncore.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

767 lines
18 KiB
C
Raw Normal View History

/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2010 Fabien Thomas
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Intel Uncore PMCs.
*/
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/pmc.h>
#include <sys/pmckern.h>
#include <sys/systm.h>
#include <machine/intr_machdep.h>
#include <x86/apicvar.h>
#include <machine/cpu.h>
#include <machine/cpufunc.h>
#include <machine/specialreg.h>
#define UCF_PMC_CAPS \
(PMC_CAP_READ | PMC_CAP_WRITE)
#define UCP_PMC_CAPS \
(PMC_CAP_EDGE | PMC_CAP_THRESHOLD | PMC_CAP_READ | PMC_CAP_WRITE | \
PMC_CAP_INVERT | PMC_CAP_QUALIFIER | PMC_CAP_PRECISE)
#define SELECTSEL(x) \
(((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \
UCP_CB0_EVSEL0 : UCP_EVSEL0)
#define SELECTOFF(x) \
(((x) == PMC_CPU_INTEL_SANDYBRIDGE || (x) == PMC_CPU_INTEL_HASWELL) ? \
UCF_OFFSET_SB : UCF_OFFSET)
static enum pmc_cputype uncore_cputype;
struct uncore_cpu {
volatile uint32_t pc_ucfctrl; /* Fixed function control. */
volatile uint64_t pc_globalctrl; /* Global control register. */
struct pmc_hw pc_uncorepmcs[];
};
static struct uncore_cpu **uncore_pcpu;
static uint64_t uncore_pmcmask;
static int uncore_ucf_ri; /* relative index of fixed counters */
static int uncore_ucf_width;
static int uncore_ucf_npmc;
static int uncore_ucp_width;
static int uncore_ucp_npmc;
static int
uncore_pcpu_noop(struct pmc_mdep *md, int cpu)
{
(void) md;
(void) cpu;
return (0);
}
static int
uncore_pcpu_init(struct pmc_mdep *md, int cpu)
{
struct pmc_cpu *pc;
struct uncore_cpu *cc;
struct pmc_hw *phw;
int uncore_ri, n, npmc;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[ucf,%d] insane cpu number %d", __LINE__, cpu));
PMCDBG1(MDP,INI,1,"uncore-init cpu=%d", cpu);
uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
cc = malloc(sizeof(struct uncore_cpu) + npmc * sizeof(struct pmc_hw),
M_PMC, M_WAITOK | M_ZERO);
uncore_pcpu[cpu] = cc;
pc = pmc_pcpu[cpu];
KASSERT(pc != NULL && cc != NULL,
("[uncore,%d] NULL per-cpu structures cpu=%d", __LINE__, cpu));
for (n = 0, phw = cc->pc_uncorepmcs; n < npmc; n++, phw++) {
phw->phw_state = PMC_PHW_FLAG_IS_ENABLED |
PMC_PHW_CPU_TO_STATE(cpu) |
PMC_PHW_INDEX_TO_STATE(n + uncore_ri);
phw->phw_pmc = NULL;
pc->pc_hwpmcs[n + uncore_ri] = phw;
}
return (0);
}
static int
uncore_pcpu_fini(struct pmc_mdep *md, int cpu)
{
int uncore_ri, n, npmc;
struct pmc_cpu *pc;
struct uncore_cpu *cc;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[uncore,%d] insane cpu number (%d)", __LINE__, cpu));
PMCDBG1(MDP,INI,1,"uncore-pcpu-fini cpu=%d", cpu);
if ((cc = uncore_pcpu[cpu]) == NULL)
return (0);
uncore_pcpu[cpu] = NULL;
pc = pmc_pcpu[cpu];
KASSERT(pc != NULL, ("[uncore,%d] NULL per-cpu %d state", __LINE__,
cpu));
npmc = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_num;
uncore_ri = md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP].pcd_ri;
for (n = 0; n < npmc; n++)
wrmsr(SELECTSEL(uncore_cputype) + n, 0);
wrmsr(UCF_CTRL, 0);
npmc += md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF].pcd_num;
for (n = 0; n < npmc; n++)
pc->pc_hwpmcs[n + uncore_ri] = NULL;
free(cc, M_PMC);
return (0);
}
/*
* Fixed function counters.
*/
static pmc_value_t
ucf_perfctr_value_to_reload_count(pmc_value_t v)
{
hwpmc: Fix amd/arm64/armv7/uncore sampling overflow race If a counter more than overflows just as we read it on switch out then, if using sampling mode, we will negate this small value to give a huge reload count, and if we later switch back in that context we will validate that value against pm_reloadcount and panic an INVARIANTS kernel with: panic: [pmc,1470] pmcval outside of expected range cpu=2 ri=16 pmcval=fffff292 pm_reloadcount=10000 or similar. Presumably in a non-INVARIANTS kernel we will instead just use the provided value as the reload count, which would lead to the overflow not happing for a very long time (e.g. 78 minutes for a 48-bit counter incrementing at an averate rate of 1GHz). Instead, clamp the reload count to 0 (which corresponds precisely to the value we would compute if it had just overflowed and no more), which will result in hwpmc using the full original reload count again. This is the approach used by core for Intel (for both fixed and programmable counters). As part of this, armv7 and arm64 are made conceptually simpler; rather than skipping modifying the overflow count for sampling mode counters so it's always kept as ~0, those special cases are removed so it's always applicable and the concatentation of it and the hardware counter can always be viewed as a 64-bit counter, which also makes them look more like other architectures. Whilst here, fix an instance of UB (shifting a 1 into the sign bit) for amd in its sign-extension code. Reviewed by: andrew, mhorne, kib MFC after: 1 week Differential Revision: https://reviews.freebsd.org/D33654
2022-01-10 14:30:05 +00:00
/* If the PMC has overflowed, return a reload count of zero. */
if ((v & (1ULL << (uncore_ucf_width - 1))) == 0)
return (0);
v &= (1ULL << uncore_ucf_width) - 1;
return (1ULL << uncore_ucf_width) - v;
}
static pmc_value_t
ucf_reload_count_to_perfctr_value(pmc_value_t rlc)
{
return (1ULL << uncore_ucf_width) - rlc;
}
static int
ucf_allocate_pmc(int cpu, int ri, struct pmc *pm,
const struct pmc_op_pmcallocate *a)
{
uint32_t flags;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[uncore,%d] illegal CPU %d", __LINE__, cpu));
PMCDBG2(MDP,ALL,1, "ucf-allocate ri=%d reqcaps=0x%x", ri, pm->pm_caps);
if (ri < 0 || ri > uncore_ucf_npmc)
return (EINVAL);
if (a->pm_class != PMC_CLASS_UCF)
return (EINVAL);
pmc: better distinguish pmu-events allocation path Background: The pm_ev field of struct pmc_op_pmcallocate and struct pmc traditionally contains the index of the chosen event, corresponding to the __PMC_EVENTS array in pmc_events.h. This is a static list of events, maintained by FreeBSD. In the usual case, libpmc translates the user supplied event name (string) into the pm_ev index, which is passed as an argument to the allocation syscall. On the kernel side, the allocation method for the relevant hwpmc class translates the given index into the event code that will be written to an event selection register. In 2018, a new source of performance event definitions was introduced: the pmu-events json files, which are maintained by the Linux kernel. The result was better coverage for newer Intel processors with a reduced maintenance burden for libpmc/hwpmc. Intel and AMD CPUs were unconditionally switched to allocate events from pmu-events instead of the traditional scheme (959826ca1bb0a, 81eb4dcf9e0d). Under the pmu-events scheme, the pm_ev field contains an index corresponding to the selected event from the pmu-events table, something which the kernel has no knowledge of. The configuration for the performance counting registers is instead passed via class-dependent fields (struct pmc_md_op_pmcallocate). In 2021 I changed the allocation logic so that it would attempt to pull from the pmu-events table first, and fall-back to the traditional method (dfb4fb41166bc3). Later, pmu-events support for arm64 and power8 CPUs was added (28dd6730a5d6 and b48a2770d48b). The problem that remains is that the pm_ev field is overloaded, without a definitive way to determine whether the event allocation came from the pmu-events table or FreeBSD's statically-defined PMC events. This resulted in a recent fix, 21f7397a61f7. Change: To disambiguate these two supported but separate use-cases, add a new flag, PMC_F_EV_PMU, to be set as part of the allocation, indicating that the event index came from pmu-events. This is useful in two ways: 1. On the kernel side, we can validate the syscall arguments better. Some classes support only the traditional event scheme (e.g. hwpmc_armv7), while others support only the pmu-events method (e.g. hwpmc_core for Intel). We can now check for this. The hwpmc_arm64 class supports both methods, so the new flag supersedes the existing MD flag, PM_MD_EVENT_RAW. 2. The flag will be tracked in struct pmc for the duration of its lifetime, meaning it is communicated back to userspace. This allows libpmc to perform the reverse index-to-event-name translation without speculating about the meaning of the index value. Adding the flag is a backwards-incompatible ABI change. We recently bumped the major version of the hwpmc module, so this breakage is acceptable. Reviewed by: jkoshy MFC after: 3 days Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D40753
2023-06-06 17:26:46 +00:00
if ((a->pm_flags & PMC_F_EV_PMU) == 0)
return (EINVAL);
flags = UCF_EN;
pm->pm_md.pm_ucf.pm_ucf_ctrl = (flags << (ri * 4));
PMCDBG1(MDP,ALL,2, "ucf-allocate config=0x%jx",
(uintmax_t) pm->pm_md.pm_ucf.pm_ucf_ctrl);
return (0);
}
static int
ucf_config_pmc(int cpu, int ri, struct pmc *pm)
{
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[uncore,%d] illegal CPU %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
("[uncore,%d] illegal row-index %d", __LINE__, ri));
PMCDBG3(MDP,CFG,1, "ucf-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
cpu));
uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc = pm;
return (0);
}
static int
ucf_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
{
struct pmc_hw *phw;
phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri];
snprintf(pi->pm_name, sizeof(pi->pm_name), "UCF-%d", ri);
pi->pm_class = PMC_CLASS_UCF;
if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
pi->pm_enabled = TRUE;
*ppmc = phw->phw_pmc;
} else {
pi->pm_enabled = FALSE;
*ppmc = NULL;
}
return (0);
}
static int
ucf_get_config(int cpu, int ri, struct pmc **ppm)
{
*ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc;
return (0);
}
static int
ucf_read_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t *v)
{
pmc_value_t tmp;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
("[uncore,%d] illegal row-index %d", __LINE__, ri));
tmp = rdmsr(UCF_CTR0 + ri);
if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
*v = ucf_perfctr_value_to_reload_count(tmp);
else
*v = tmp;
PMCDBG3(MDP,REA,1, "ucf-read cpu=%d ri=%d -> v=%jx", cpu, ri, *v);
return (0);
}
static int
ucf_release_pmc(int cpu, int ri, struct pmc *pmc)
{
PMCDBG3(MDP,REL,1, "ucf-release cpu=%d ri=%d pm=%p", cpu, ri, pmc);
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
("[uncore,%d] illegal row-index %d", __LINE__, ri));
KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri + uncore_ucf_ri].phw_pmc == NULL,
("[uncore,%d] PHW pmc non-NULL", __LINE__));
return (0);
}
static int
ucf_start_pmc(int cpu, int ri, struct pmc *pm)
{
struct uncore_cpu *ucfc;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
("[uncore,%d] illegal row-index %d", __LINE__, ri));
PMCDBG2(MDP,STA,1,"ucf-start cpu=%d ri=%d", cpu, ri);
ucfc = uncore_pcpu[cpu];
ucfc->pc_ucfctrl |= pm->pm_md.pm_ucf.pm_ucf_ctrl;
wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
ucfc->pc_globalctrl |= (1ULL << (ri + SELECTOFF(uncore_cputype)));
wrmsr(UC_GLOBAL_CTRL, ucfc->pc_globalctrl);
PMCDBG4(MDP,STA,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
return (0);
}
static int
ucf_stop_pmc(int cpu, int ri, struct pmc *pm __unused)
{
uint32_t fc;
struct uncore_cpu *ucfc;
PMCDBG2(MDP,STO,1,"ucf-stop cpu=%d ri=%d", cpu, ri);
ucfc = uncore_pcpu[cpu];
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
("[uncore,%d] illegal row-index %d", __LINE__, ri));
fc = (UCF_MASK << (ri * 4));
ucfc->pc_ucfctrl &= ~fc;
PMCDBG1(MDP,STO,1,"ucf-stop ucfctrl=%x", ucfc->pc_ucfctrl);
wrmsr(UCF_CTRL, ucfc->pc_ucfctrl);
/* Don't need to write UC_GLOBAL_CTRL, one disable is enough. */
PMCDBG4(MDP,STO,1,"ucfctrl=%x(%x) globalctrl=%jx(%jx)",
ucfc->pc_ucfctrl, (uint32_t) rdmsr(UCF_CTRL),
ucfc->pc_globalctrl, rdmsr(UC_GLOBAL_CTRL));
return (0);
}
static int
ucf_write_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t v)
{
struct uncore_cpu *cc;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < uncore_ucf_npmc,
("[uncore,%d] illegal row-index %d", __LINE__, ri));
cc = uncore_pcpu[cpu];
if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
v = ucf_reload_count_to_perfctr_value(v);
wrmsr(UCF_CTRL, 0); /* Turn off fixed counters */
wrmsr(UCF_CTR0 + ri, v);
wrmsr(UCF_CTRL, cc->pc_ucfctrl);
PMCDBG4(MDP,WRI,1, "ucf-write cpu=%d ri=%d v=%jx ucfctrl=%jx ",
cpu, ri, v, (uintmax_t) rdmsr(UCF_CTRL));
return (0);
}
static void
ucf_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
{
struct pmc_classdep *pcd;
KASSERT(md != NULL, ("[ucf,%d] md is NULL", __LINE__));
PMCDBG0(MDP,INI,1, "ucf-initialize");
pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCF];
pcd->pcd_caps = UCF_PMC_CAPS;
pcd->pcd_class = PMC_CLASS_UCF;
pcd->pcd_num = npmc;
pcd->pcd_ri = md->pmd_npmc;
pcd->pcd_width = pmcwidth;
pcd->pcd_allocate_pmc = ucf_allocate_pmc;
pcd->pcd_config_pmc = ucf_config_pmc;
pcd->pcd_describe = ucf_describe;
pcd->pcd_get_config = ucf_get_config;
pcd->pcd_get_msr = NULL;
pcd->pcd_pcpu_fini = uncore_pcpu_noop;
pcd->pcd_pcpu_init = uncore_pcpu_noop;
pcd->pcd_read_pmc = ucf_read_pmc;
pcd->pcd_release_pmc = ucf_release_pmc;
pcd->pcd_start_pmc = ucf_start_pmc;
pcd->pcd_stop_pmc = ucf_stop_pmc;
pcd->pcd_write_pmc = ucf_write_pmc;
md->pmd_npmc += npmc;
}
/*
* Intel programmable PMCs.
*/
/*
* Event descriptor tables.
*
* For each event id, we track:
*
* 1. The CPUs that the event is valid for.
*
* 2. If the event uses a fixed UMASK, the value of the umask field.
* If the event doesn't use a fixed UMASK, a mask of legal bits
* to check against.
*/
struct ucp_event_descr {
enum pmc_event ucp_ev;
unsigned char ucp_evcode;
unsigned char ucp_umask;
unsigned char ucp_flags;
};
#define UCP_F_I7 (1 << 0) /* CPU: Core i7 */
#define UCP_F_WM (1 << 1) /* CPU: Westmere */
#define UCP_F_SB (1 << 2) /* CPU: Sandy Bridge */
#define UCP_F_HW (1 << 3) /* CPU: Haswell */
#define UCP_F_FM (1 << 4) /* Fixed mask */
#define UCP_F_ALLCPUS \
(UCP_F_I7 | UCP_F_WM)
#define UCP_F_CMASK 0xFF000000
static pmc_value_t
ucp_perfctr_value_to_reload_count(pmc_value_t v)
{
v &= (1ULL << uncore_ucp_width) - 1;
return (1ULL << uncore_ucp_width) - v;
}
static pmc_value_t
ucp_reload_count_to_perfctr_value(pmc_value_t rlc)
{
return (1ULL << uncore_ucp_width) - rlc;
}
/*
* Counter specific event information for Sandybridge and Haswell
*/
static int
ucp_event_sb_hw_ok_on_counter(uint8_t ev, int ri)
{
uint32_t mask;
switch (ev) {
/*
* Events valid only on counter 0.
*/
case 0x80:
case 0x83:
mask = (1 << 0);
break;
default:
mask = ~0; /* Any row index is ok. */
}
return (mask & (1 << ri));
}
static int
ucp_allocate_pmc(int cpu, int ri, struct pmc *pm,
const struct pmc_op_pmcallocate *a)
{
uint8_t ev;
const struct pmc_md_ucp_op_pmcallocate *ucp;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[uncore,%d] illegal CPU %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
("[uncore,%d] illegal row-index value %d", __LINE__, ri));
if (a->pm_class != PMC_CLASS_UCP)
return (EINVAL);
pmc: better distinguish pmu-events allocation path Background: The pm_ev field of struct pmc_op_pmcallocate and struct pmc traditionally contains the index of the chosen event, corresponding to the __PMC_EVENTS array in pmc_events.h. This is a static list of events, maintained by FreeBSD. In the usual case, libpmc translates the user supplied event name (string) into the pm_ev index, which is passed as an argument to the allocation syscall. On the kernel side, the allocation method for the relevant hwpmc class translates the given index into the event code that will be written to an event selection register. In 2018, a new source of performance event definitions was introduced: the pmu-events json files, which are maintained by the Linux kernel. The result was better coverage for newer Intel processors with a reduced maintenance burden for libpmc/hwpmc. Intel and AMD CPUs were unconditionally switched to allocate events from pmu-events instead of the traditional scheme (959826ca1bb0a, 81eb4dcf9e0d). Under the pmu-events scheme, the pm_ev field contains an index corresponding to the selected event from the pmu-events table, something which the kernel has no knowledge of. The configuration for the performance counting registers is instead passed via class-dependent fields (struct pmc_md_op_pmcallocate). In 2021 I changed the allocation logic so that it would attempt to pull from the pmu-events table first, and fall-back to the traditional method (dfb4fb41166bc3). Later, pmu-events support for arm64 and power8 CPUs was added (28dd6730a5d6 and b48a2770d48b). The problem that remains is that the pm_ev field is overloaded, without a definitive way to determine whether the event allocation came from the pmu-events table or FreeBSD's statically-defined PMC events. This resulted in a recent fix, 21f7397a61f7. Change: To disambiguate these two supported but separate use-cases, add a new flag, PMC_F_EV_PMU, to be set as part of the allocation, indicating that the event index came from pmu-events. This is useful in two ways: 1. On the kernel side, we can validate the syscall arguments better. Some classes support only the traditional event scheme (e.g. hwpmc_armv7), while others support only the pmu-events method (e.g. hwpmc_core for Intel). We can now check for this. The hwpmc_arm64 class supports both methods, so the new flag supersedes the existing MD flag, PM_MD_EVENT_RAW. 2. The flag will be tracked in struct pmc for the duration of its lifetime, meaning it is communicated back to userspace. This allows libpmc to perform the reverse index-to-event-name translation without speculating about the meaning of the index value. Adding the flag is a backwards-incompatible ABI change. We recently bumped the major version of the hwpmc module, so this breakage is acceptable. Reviewed by: jkoshy MFC after: 3 days Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D40753
2023-06-06 17:26:46 +00:00
if ((a->pm_flags & PMC_F_EV_PMU) == 0)
return (EINVAL);
ucp = &a->pm_md.pm_ucp;
ev = UCP_EVSEL(ucp->pm_ucp_config);
switch (uncore_cputype) {
case PMC_CPU_INTEL_HASWELL:
case PMC_CPU_INTEL_SANDYBRIDGE:
if (ucp_event_sb_hw_ok_on_counter(ev, ri) == 0)
return (EINVAL);
break;
default:
break;
}
pm->pm_md.pm_ucp.pm_ucp_evsel = ucp->pm_ucp_config | UCP_EN;
return (0);
}
static int
ucp_config_pmc(int cpu, int ri, struct pmc *pm)
{
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[uncore,%d] illegal CPU %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
("[uncore,%d] illegal row-index %d", __LINE__, ri));
PMCDBG3(MDP,CFG,1, "ucp-config cpu=%d ri=%d pm=%p", cpu, ri, pm);
KASSERT(uncore_pcpu[cpu] != NULL, ("[uncore,%d] null per-cpu %d", __LINE__,
cpu));
uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc = pm;
return (0);
}
static int
ucp_describe(int cpu, int ri, struct pmc_info *pi, struct pmc **ppmc)
{
struct pmc_hw *phw;
phw = &uncore_pcpu[cpu]->pc_uncorepmcs[ri];
snprintf(pi->pm_name, sizeof(pi->pm_name), "UCP-%d", ri);
pi->pm_class = PMC_CLASS_UCP;
if (phw->phw_state & PMC_PHW_FLAG_IS_ENABLED) {
pi->pm_enabled = TRUE;
*ppmc = phw->phw_pmc;
} else {
pi->pm_enabled = FALSE;
*ppmc = NULL;
}
return (0);
}
static int
ucp_get_config(int cpu, int ri, struct pmc **ppm)
{
*ppm = uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc;
return (0);
}
static int
ucp_read_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t *v)
{
pmc_value_t tmp;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
("[uncore,%d] illegal row-index %d", __LINE__, ri));
tmp = rdmsr(UCP_PMC0 + ri);
if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
*v = ucp_perfctr_value_to_reload_count(tmp);
else
*v = tmp;
PMCDBG4(MDP,REA,1, "ucp-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
ri, *v);
return (0);
}
static int
ucp_release_pmc(int cpu, int ri, struct pmc *pm)
{
(void) pm;
PMCDBG3(MDP,REL,1, "ucp-release cpu=%d ri=%d pm=%p", cpu, ri,
pm);
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
("[uncore,%d] illegal row-index %d", __LINE__, ri));
KASSERT(uncore_pcpu[cpu]->pc_uncorepmcs[ri].phw_pmc
== NULL, ("[uncore,%d] PHW pmc non-NULL", __LINE__));
return (0);
}
static int
ucp_start_pmc(int cpu, int ri, struct pmc *pm)
{
uint64_t evsel;
struct uncore_cpu *cc;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
("[uncore,%d] illegal row-index %d", __LINE__, ri));
cc = uncore_pcpu[cpu];
PMCDBG2(MDP,STA,1, "ucp-start cpu=%d ri=%d", cpu, ri);
evsel = pm->pm_md.pm_ucp.pm_ucp_evsel;
PMCDBG4(MDP,STA,2,
"ucp-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x",
cpu, ri, SELECTSEL(uncore_cputype) + ri, evsel);
wrmsr(SELECTSEL(uncore_cputype) + ri, evsel);
cc->pc_globalctrl |= (1ULL << ri);
wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl);
return (0);
}
static int
ucp_stop_pmc(int cpu, int ri, struct pmc *pm __unused)
{
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
("[uncore,%d] illegal row index %d", __LINE__, ri));
PMCDBG2(MDP,STO,1, "ucp-stop cpu=%d ri=%d", cpu, ri);
/* stop hw. */
wrmsr(SELECTSEL(uncore_cputype) + ri, 0);
/* Don't need to write UC_GLOBAL_CTRL, one disable is enough. */
return (0);
}
static int
ucp_write_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t v)
{
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
("[uncore,%d] illegal cpu value %d", __LINE__, cpu));
KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
("[uncore,%d] illegal row index %d", __LINE__, ri));
PMCDBG4(MDP,WRI,1, "ucp-write cpu=%d ri=%d msr=0x%x v=%jx", cpu, ri,
UCP_PMC0 + ri, v);
if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
v = ucp_reload_count_to_perfctr_value(v);
/*
* Write the new value to the counter. The counter will be in
* a stopped state when the pcd_write() entry point is called.
*/
wrmsr(UCP_PMC0 + ri, v);
return (0);
}
static void
ucp_initialize(struct pmc_mdep *md, int maxcpu, int npmc, int pmcwidth)
{
struct pmc_classdep *pcd;
KASSERT(md != NULL, ("[ucp,%d] md is NULL", __LINE__));
PMCDBG0(MDP,INI,1, "ucp-initialize");
pcd = &md->pmd_classdep[PMC_MDEP_CLASS_INDEX_UCP];
pcd->pcd_caps = UCP_PMC_CAPS;
pcd->pcd_class = PMC_CLASS_UCP;
pcd->pcd_num = npmc;
pcd->pcd_ri = md->pmd_npmc;
pcd->pcd_width = pmcwidth;
pcd->pcd_allocate_pmc = ucp_allocate_pmc;
pcd->pcd_config_pmc = ucp_config_pmc;
pcd->pcd_describe = ucp_describe;
pcd->pcd_get_config = ucp_get_config;
pcd->pcd_get_msr = NULL;
pcd->pcd_pcpu_fini = uncore_pcpu_fini;
pcd->pcd_pcpu_init = uncore_pcpu_init;
pcd->pcd_read_pmc = ucp_read_pmc;
pcd->pcd_release_pmc = ucp_release_pmc;
pcd->pcd_start_pmc = ucp_start_pmc;
pcd->pcd_stop_pmc = ucp_stop_pmc;
pcd->pcd_write_pmc = ucp_write_pmc;
md->pmd_npmc += npmc;
}
int
pmc_uncore_initialize(struct pmc_mdep *md, int maxcpu)
{
uncore_cputype = md->pmd_cputype;
uncore_pmcmask = 0;
/*
* Initialize programmable counters.
*/
uncore_ucp_npmc = 8;
uncore_ucp_width = 48;
uncore_pmcmask |= ((1ULL << uncore_ucp_npmc) - 1);
ucp_initialize(md, maxcpu, uncore_ucp_npmc, uncore_ucp_width);
/*
* Initialize fixed function counters, if present.
*/
uncore_ucf_ri = uncore_ucp_npmc;
uncore_ucf_npmc = 1;
uncore_ucf_width = 48;
ucf_initialize(md, maxcpu, uncore_ucf_npmc, uncore_ucf_width);
uncore_pmcmask |= ((1ULL << uncore_ucf_npmc) - 1) << SELECTOFF(uncore_cputype);
PMCDBG2(MDP,INI,1,"uncore-init pmcmask=0x%jx ucfri=%d", uncore_pmcmask,
uncore_ucf_ri);
uncore_pcpu = malloc(sizeof(*uncore_pcpu) * maxcpu, M_PMC,
M_ZERO | M_WAITOK);
return (0);
}
void
pmc_uncore_finalize(struct pmc_mdep *md)
{
PMCDBG0(MDP,INI,1, "uncore-finalize");
for (int i = 0; i < pmc_cpu_max(); i++)
KASSERT(uncore_pcpu[i] == NULL,
("[uncore,%d] non-null pcpu cpu %d", __LINE__, i));
free(uncore_pcpu, M_PMC);
uncore_pcpu = NULL;
}