mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 23:13:01 +02:00
perf/x86/intel: Add CPUID enumeration for the auto counter reload
The counters that support the auto counter reload feature can be enumerated in the CPUID Leaf 0x23 sub-leaf 0x2. Add acr_cntr_mask to store the mask of counters which are reloadable. Add acr_cause_mask to store the mask of counters which can cause reload. Since the e-core and p-core may have different numbers of counters, track the masks in the struct x86_hybrid_pmu as well. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Thomas Falcon <thomas.falcon@intel.com> Link: https://lkml.kernel.org/r/20250327195217.2683619-5-kan.liang@linux.intel.com
This commit is contained in:
parent
c9449c8506
commit
1856c6c2f8
|
@ -5069,6 +5069,16 @@ static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
|
|||
pmu->fixed_cntr_mask64 = fixed_cntr;
|
||||
}
|
||||
|
||||
if (eax.split.acr_subleaf) {
|
||||
cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_ACR_LEAF,
|
||||
&cntr, &fixed_cntr, &ecx, &edx);
|
||||
/* The mask of the counters which can be reloaded */
|
||||
pmu->acr_cntr_mask64 = cntr | ((u64)fixed_cntr << INTEL_PMC_IDX_FIXED);
|
||||
|
||||
/* The mask of the counters which can cause a reload of reloadable counters */
|
||||
pmu->acr_cause_mask64 = ecx | ((u64)edx << INTEL_PMC_IDX_FIXED);
|
||||
}
|
||||
|
||||
if (!intel_pmu_broken_perf_cap()) {
|
||||
/* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */
|
||||
rdmsrl(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities);
|
||||
|
|
|
@ -708,6 +708,15 @@ struct x86_hybrid_pmu {
|
|||
u64 fixed_cntr_mask64;
|
||||
unsigned long fixed_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||
};
|
||||
|
||||
union {
|
||||
u64 acr_cntr_mask64;
|
||||
unsigned long acr_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||
};
|
||||
union {
|
||||
u64 acr_cause_mask64;
|
||||
unsigned long acr_cause_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||
};
|
||||
struct event_constraint unconstrained;
|
||||
|
||||
u64 hw_cache_event_ids
|
||||
|
@ -806,6 +815,14 @@ struct x86_pmu {
|
|||
u64 fixed_cntr_mask64;
|
||||
unsigned long fixed_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||
};
|
||||
union {
|
||||
u64 acr_cntr_mask64;
|
||||
unsigned long acr_cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||
};
|
||||
union {
|
||||
u64 acr_cause_mask64;
|
||||
unsigned long acr_cause_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||
};
|
||||
int cntval_bits;
|
||||
u64 cntval_mask;
|
||||
union {
|
||||
|
|
|
@ -195,6 +195,7 @@ union cpuid10_edx {
|
|||
*/
|
||||
#define ARCH_PERFMON_EXT_LEAF 0x00000023
|
||||
#define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1
|
||||
#define ARCH_PERFMON_ACR_LEAF 0x2
|
||||
|
||||
union cpuid35_eax {
|
||||
struct {
|
||||
|
|
Loading…
Reference in New Issue
Block a user