Skip to content

Commit

Permalink
perf/x86/amd/uncore: Use dynamic events array
Browse files Browse the repository at this point in the history
If AMD Performance Monitoring Version 2 (PerfMonV2) is
supported, the number of available counters for a given
uncore PMU may not be fixed across families and models
and has to be determined at runtime.

The per-cpu uncore PMU data currently uses a fixed-sized
array for event information. Make it dynamic based on the
number of available counters.

Signed-off-by: Sandipan Das <sandipan.das@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/21eea0cb6de9d14f78d52d1d62637ae02bc900f5.1652954372.git.sandipan.das@amd.com
  • Loading branch information
sandip4n authored and Peter Zijlstra committed Jun 13, 2022
1 parent e60b7cb commit 39621c5
Showing 1 changed file with 31 additions and 7 deletions.
38 changes: 31 additions & 7 deletions arch/x86/events/amd/uncore.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
#define NUM_COUNTERS_NB 4
#define NUM_COUNTERS_L2 4
#define NUM_COUNTERS_L3 6
#define MAX_COUNTERS 6

#define RDPMC_BASE_NB 6
#define RDPMC_BASE_LLC 10
Expand All @@ -46,7 +45,7 @@ struct amd_uncore {
u32 msr_base;
cpumask_t *active_mask;
struct pmu *pmu;
struct perf_event *events[MAX_COUNTERS];
struct perf_event **events;
struct hlist_node node;
};

Expand Down Expand Up @@ -370,11 +369,19 @@ static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
cpu_to_node(cpu));
}

static inline struct perf_event **
amd_uncore_events_alloc(unsigned int num, unsigned int cpu)
{
return kzalloc_node(sizeof(struct perf_event *) * num, GFP_KERNEL,
cpu_to_node(cpu));
}

static int amd_uncore_cpu_up_prepare(unsigned int cpu)
{
struct amd_uncore *uncore_nb = NULL, *uncore_llc;
struct amd_uncore *uncore_nb = NULL, *uncore_llc = NULL;

if (amd_uncore_nb) {
*per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
uncore_nb = amd_uncore_alloc(cpu);
if (!uncore_nb)
goto fail;
Expand All @@ -384,11 +391,15 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
uncore_nb->active_mask = &amd_nb_active_mask;
uncore_nb->pmu = &amd_nb_pmu;
uncore_nb->events = amd_uncore_events_alloc(num_counters_nb, cpu);
if (!uncore_nb->events)
goto fail;
uncore_nb->id = -1;
*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
}

if (amd_uncore_llc) {
*per_cpu_ptr(amd_uncore_llc, cpu) = NULL;
uncore_llc = amd_uncore_alloc(cpu);
if (!uncore_llc)
goto fail;
Expand All @@ -398,16 +409,26 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
uncore_llc->active_mask = &amd_llc_active_mask;
uncore_llc->pmu = &amd_llc_pmu;
uncore_llc->events = amd_uncore_events_alloc(num_counters_llc, cpu);
if (!uncore_llc->events)
goto fail;
uncore_llc->id = -1;
*per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
}

return 0;

fail:
if (amd_uncore_nb)
*per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
kfree(uncore_nb);
if (uncore_nb) {
kfree(uncore_nb->events);
kfree(uncore_nb);
}

if (uncore_llc) {
kfree(uncore_llc->events);
kfree(uncore_llc);
}

return -ENOMEM;
}

Expand Down Expand Up @@ -540,8 +561,11 @@ static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
if (cpu == uncore->cpu)
cpumask_clear_cpu(cpu, uncore->active_mask);

if (!--uncore->refcnt)
if (!--uncore->refcnt) {
kfree(uncore->events);
kfree(uncore);
}

*per_cpu_ptr(uncores, cpu) = NULL;
}

Expand Down

0 comments on commit 39621c5

Please sign in to comment.