1
0

release 6.12.9

This commit is contained in:
2025-01-09 22:48:22 +03:00
parent 3fb1083e44
commit d372c88caf
67 changed files with 19 additions and 5288 deletions

View File

@@ -1,70 +0,0 @@
From f077f4265c59f5d417aa40eaf82bb632c891b221 Mon Sep 17 00:00:00 2001
From: Perry Yuan <perry.yuan@amd.com>
Date: Fri, 9 Aug 2024 14:09:05 +0800
Subject: cpufreq: amd-pstate: add quirk for Ryzen 3000 series processor
The Ryzen 3000 series processors have been observed lacking the
nominal_freq and lowest_freq parameters in their ACPI tables. This
absence causes issues with loading the amd-pstate driver on these
systems. Introduces a fix to resolve the dependency issue
by adding a quirk specifically for the Ryzen 3000 series.
Reported-by: David Wang <00107082@163.com>
Signed-off-by: Perry Yuan <perry.yuan@amd.com>
---
drivers/cpufreq/amd-pstate.c | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -136,6 +136,11 @@ static struct quirk_entry quirk_amd_7k62
.lowest_freq = 550,
};
+static struct quirk_entry quirk_amd_mts = {
+ .nominal_freq = 3600,
+ .lowest_freq = 550,
+};
+
static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
{
/**
@@ -152,6 +157,21 @@ static int __init dmi_matched_7k62_bios_
return 0;
}
+static int __init dmi_matched_mts_bios_bug(const struct dmi_system_id *dmi)
+{
+ /**
+ * match the broken bios for ryzen 3000 series processor support CPPC V2
+ * broken BIOS lack of nominal_freq and lowest_freq capabilities
+ * definition in ACPI tables
+ */
+ if (cpu_feature_enabled(X86_FEATURE_ZEN2)) {
+ quirks = dmi->driver_data;
+ pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident);
+ return 1;
+ }
+
+ return 0;
+}
static const struct dmi_system_id amd_pstate_quirks_table[] __initconst = {
{
.callback = dmi_matched_7k62_bios_bug,
@@ -162,6 +182,16 @@ static const struct dmi_system_id amd_ps
},
.driver_data = &quirk_amd_7k62,
},
+ {
+ .callback = dmi_matched_mts_bios_bug,
+ .ident = "AMD Ryzen 3000",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "B450M MORTAR MAX (MS-7B89)"),
+ DMI_MATCH(DMI_BIOS_RELEASE, "06/10/2020"),
+ DMI_MATCH(DMI_BIOS_VERSION, "5.14"),
+ },
+ .driver_data = &quirk_amd_mts,
+ },
{}
};
MODULE_DEVICE_TABLE(dmi, amd_pstate_quirks_table);

View File

@@ -1,24 +0,0 @@
From da2702cb43c82219d4624135d85e2fc8768aeb09 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 5 Sep 2024 11:23:51 -0500
Subject: cpufreq/amd-pstate: Fix non kerneldoc comment
The comment for amd_cppc_supported() isn't meant to be kernel doc.
Fixes: cb817ec6673b7 ("cpufreq: amd-pstate: show CPPC debug message if CPPC is not supported")
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1782,7 +1782,7 @@ static int __init amd_pstate_set_driver(
return -EINVAL;
}
-/**
+/*
* CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F.
* show the debug message that helps to check if the CPU has CPPC support for loading issue.
*/

View File

@@ -1,108 +0,0 @@
From a7b86a6057ccc8f5b5ab4d08e753b2a034fa7d28 Mon Sep 17 00:00:00 2001
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Date: Tue, 17 Sep 2024 09:14:35 +0000
Subject: cpufreq/amd-pstate: Rename MSR and shared memory specific functions
Existing function names "cppc_*" and "pstate_*" for shared memory and
MSR based systems are not intuitive enough, replace them with "shmem_*" and
"msr_*" respectively.
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
---
drivers/cpufreq/amd-pstate.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -263,7 +263,7 @@ static int amd_pstate_get_energy_pref_in
return index;
}
-static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+static void msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
u32 des_perf, u32 max_perf, bool fast_switch)
{
if (fast_switch)
@@ -273,7 +273,7 @@ static void pstate_update_perf(struct am
READ_ONCE(cpudata->cppc_req_cached));
}
-DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
+DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
u32 min_perf, u32 des_perf,
@@ -336,7 +336,7 @@ static int amd_pstate_set_energy_pref_in
return ret;
}
-static inline int pstate_enable(bool enable)
+static inline int msr_enable(bool enable)
{
int ret, cpu;
unsigned long logical_proc_id_mask = 0;
@@ -362,7 +362,7 @@ static inline int pstate_enable(bool ena
return 0;
}
-static int cppc_enable(bool enable)
+static int shmem_enable(bool enable)
{
int cpu, ret = 0;
struct cppc_perf_ctrls perf_ctrls;
@@ -389,14 +389,14 @@ static int cppc_enable(bool enable)
return ret;
}
-DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
+DEFINE_STATIC_CALL(amd_pstate_enable, msr_enable);
static inline int amd_pstate_enable(bool enable)
{
return static_call(amd_pstate_enable)(enable);
}
-static int pstate_init_perf(struct amd_cpudata *cpudata)
+static int msr_init_perf(struct amd_cpudata *cpudata)
{
u64 cap1;
@@ -415,7 +415,7 @@ static int pstate_init_perf(struct amd_c
return 0;
}
-static int cppc_init_perf(struct amd_cpudata *cpudata)
+static int shmem_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
@@ -450,14 +450,14 @@ static int cppc_init_perf(struct amd_cpu
return ret;
}
-DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
+DEFINE_STATIC_CALL(amd_pstate_init_perf, msr_init_perf);
static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
{
return static_call(amd_pstate_init_perf)(cpudata);
}
-static void cppc_update_perf(struct amd_cpudata *cpudata,
+static void shmem_update_perf(struct amd_cpudata *cpudata,
u32 min_perf, u32 des_perf,
u32 max_perf, bool fast_switch)
{
@@ -1905,9 +1905,9 @@ static int __init amd_pstate_init(void)
current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
} else {
pr_debug("AMD CPPC shared memory based functionality is supported\n");
- static_call_update(amd_pstate_enable, cppc_enable);
- static_call_update(amd_pstate_init_perf, cppc_init_perf);
- static_call_update(amd_pstate_update_perf, cppc_update_perf);
+ static_call_update(amd_pstate_enable, shmem_enable);
+ static_call_update(amd_pstate_init_perf, shmem_init_perf);
+ static_call_update(amd_pstate_update_perf, shmem_update_perf);
}
if (amd_pstate_prefcore) {

View File

@@ -1,49 +0,0 @@
From 4e784acad1b275ddcb105b053922ab2179ab3f86 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Sat, 12 Oct 2024 12:45:18 -0500
Subject: cpufreq/amd-pstate: Use amd_pstate_update_min_max_limit() for EPP
limits
When the EPP updates are set the maximum capable frequency for the
CPU is used to set the upper limit instead of that of the policy.
Adjust amd_pstate_epp_update_limit() to reuse policy calculation code
from amd_pstate_update_min_max_limit().
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 19 +++----------------
1 file changed, 3 insertions(+), 16 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1515,26 +1515,13 @@ static void amd_pstate_epp_cpu_exit(stru
static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
+ u32 max_perf, min_perf;
u64 value;
s16 epp;
- if (cpudata->boost_supported && !policy->boost_enabled)
- max_perf = READ_ONCE(cpudata->nominal_perf);
- else
- max_perf = READ_ONCE(cpudata->highest_perf);
+ max_perf = READ_ONCE(cpudata->highest_perf);
min_perf = READ_ONCE(cpudata->lowest_perf);
- max_limit_perf = div_u64(policy->max * max_perf, policy->cpuinfo.max_freq);
- min_limit_perf = div_u64(policy->min * max_perf, policy->cpuinfo.max_freq);
-
- if (min_limit_perf < min_perf)
- min_limit_perf = min_perf;
-
- if (max_limit_perf < min_limit_perf)
- max_limit_perf = min_limit_perf;
-
- WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
- WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
+ amd_pstate_update_min_max_limit(policy);
max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
cpudata->max_limit_perf);

View File

@@ -1,29 +0,0 @@
From ed39a400d49baf070571bb1da475297a8495aa67 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Sat, 12 Oct 2024 12:45:19 -0500
Subject: cpufreq/amd-pstate: Drop needless EPP initialization
The EPP value doesn't need to be cached to the CPPC request in
amd_pstate_epp_update_limit() because it's passed as an argument
at the end to amd_pstate_set_epp() and stored at that time.
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 6 ------
1 file changed, 6 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1558,12 +1558,6 @@ static int amd_pstate_epp_update_limit(s
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
epp = 0;
- /* Set initial EPP value */
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- value &= ~GENMASK_ULL(31, 24);
- value |= (u64)epp << 24;
- }
-
WRITE_ONCE(cpudata->cppc_req_cached, value);
return amd_pstate_set_epp(cpudata, epp);
}

View File

@@ -1,54 +0,0 @@
From 2a012fdcef91db6e380000b8042f90544ea49fd7 Mon Sep 17 00:00:00 2001
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Date: Thu, 17 Oct 2024 05:39:28 +0000
Subject: cpufreq/amd-pstate: Remove the redundant verify() function
Merge the two verify() callback functions and rename the
cpufreq_policy_data argument for better readability.
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
---
drivers/cpufreq/amd-pstate.c | 15 ++++-----------
1 file changed, 4 insertions(+), 11 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -557,10 +557,10 @@ cpufreq_policy_put:
cpufreq_cpu_put(policy);
}
-static int amd_pstate_verify(struct cpufreq_policy_data *policy)
+static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
{
- cpufreq_verify_within_cpu_limits(policy);
-
+ cpufreq_verify_within_cpu_limits(policy_data);
+ pr_debug("policy_max =%d, policy_min=%d\n", policy_data->max, policy_data->min);
return 0;
}
@@ -1668,13 +1668,6 @@ static int amd_pstate_epp_cpu_offline(st
return 0;
}
-static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy)
-{
- cpufreq_verify_within_cpu_limits(policy);
- pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min);
- return 0;
-}
-
static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
@@ -1730,7 +1723,7 @@ static struct cpufreq_driver amd_pstate_
static struct cpufreq_driver amd_pstate_epp_driver = {
.flags = CPUFREQ_CONST_LOOPS,
- .verify = amd_pstate_epp_verify_policy,
+ .verify = amd_pstate_verify,
.setpolicy = amd_pstate_epp_set_policy,
.init = amd_pstate_epp_cpu_init,
.exit = amd_pstate_epp_cpu_exit,

View File

@@ -1,74 +0,0 @@
From ba9f0c8ca71a1c44e35dd8d3221abda0bd4e465a Mon Sep 17 00:00:00 2001
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Date: Thu, 17 Oct 2024 05:39:30 +0000
Subject: cpufreq/amd-pstate: Set the initial min_freq to lowest_nonlinear_freq
According to the AMD architectural programmer's manual volume 2 [1], in
section "17.6.4.1 CPPC_CAPABILITY_1" lowest_nonlinear_perf is described
as "Reports the most energy efficient performance level (in terms of
performance per watt). Above this threshold, lower performance levels
generally result in increased energy efficiency. Reducing performance
below this threshold does not result in total energy savings for a given
computation, although it reduces instantaneous power consumption". So
lowest_nonlinear_perf is the most power efficient performance level, and
going below that would lead to a worse performance/watt.
Also, setting the minimum frequency to lowest_nonlinear_freq (instead of
lowest_freq) allows the CPU to idle at a higher frequency which leads
to more time being spent in a deeper idle state (as trivial idle tasks
are completed sooner). This has shown a power benefit in some systems,
in other systems, power consumption has increased but so has the
throughput/watt.
Modify the initial policy_data->min set by cpufreq-core to
lowest_nonlinear_freq, in the ->verify() callback. Also set the
cpudata->req[0] to FREQ_QOS_MIN_DEFAULT_VALUE (i.e. 0), so that it also
gets overriden by the check in verify function.
Link: https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/24593.pdf [1]
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 21 ++++++++++++++++++++-
1 file changed, 20 insertions(+), 1 deletion(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -559,8 +559,27 @@ cpufreq_policy_put:
static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
{
+ /*
+ * Initialize lower frequency limit (i.e.policy->min) with
+ * lowest_nonlinear_frequency which is the most energy efficient
+ * frequency. Override the initial value set by cpufreq core and
+ * amd-pstate qos_requests.
+ */
+ if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) {
+ struct cpufreq_policy *policy = cpufreq_cpu_get(policy_data->cpu);
+ struct amd_cpudata *cpudata;
+
+ if (!policy)
+ return -EINVAL;
+
+ cpudata = policy->driver_data;
+ policy_data->min = cpudata->lowest_nonlinear_freq;
+ cpufreq_cpu_put(policy);
+ }
+
cpufreq_verify_within_cpu_limits(policy_data);
pr_debug("policy_max =%d, policy_min=%d\n", policy_data->max, policy_data->min);
+
return 0;
}
@@ -1009,7 +1028,7 @@ static int amd_pstate_cpu_init(struct cp
policy->fast_switch_possible = true;
ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
- FREQ_QOS_MIN, policy->cpuinfo.min_freq);
+ FREQ_QOS_MIN, FREQ_QOS_MIN_DEFAULT_VALUE);
if (ret < 0) {
dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
goto free_cpudata1;

View File

@@ -1,44 +0,0 @@
From e50aedf7fcd345c6f2e22a942deae007d71582df Mon Sep 17 00:00:00 2001
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Date: Thu, 17 Oct 2024 10:05:27 +0000
Subject: cpufreq/amd-pstate: Call amd_pstate_register() in amd_pstate_init()
Replace a similar chunk of code in amd_pstate_init() with
amd_pstate_register() call.
Suggested-by: Mario Limonciello <mario.limonciello@amd.com>
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
---
drivers/cpufreq/amd-pstate.c | 12 ++----------
1 file changed, 2 insertions(+), 10 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1909,17 +1909,10 @@ static int __init amd_pstate_init(void)
return ret;
}
- /* enable amd pstate feature */
- ret = amd_pstate_enable(true);
- if (ret) {
- pr_err("failed to enable driver mode(%d)\n", cppc_state);
- return ret;
- }
-
- ret = cpufreq_register_driver(current_pstate_driver);
+ ret = amd_pstate_register_driver(cppc_state);
if (ret) {
pr_err("failed to register with return %d\n", ret);
- goto disable_driver;
+ return ret;
}
dev_root = bus_get_dev_root(&cpu_subsys);
@@ -1936,7 +1929,6 @@ static int __init amd_pstate_init(void)
global_attr_free:
cpufreq_unregister_driver(current_pstate_driver);
-disable_driver:
amd_pstate_enable(false);
return ret;
}

View File

@@ -1,81 +0,0 @@
From 658af2bf561ee3883369b2caca4d688983f943de Mon Sep 17 00:00:00 2001
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Date: Thu, 17 Oct 2024 10:05:29 +0000
Subject: cpufreq/amd-pstate: Call amd_pstate_set_driver() in
amd_pstate_register_driver()
Replace a similar chunk of code in amd_pstate_register_driver() with
amd_pstate_set_driver() call.
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
---
drivers/cpufreq/amd-pstate.c | 47 +++++++++++++++++-------------------
1 file changed, 22 insertions(+), 25 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1221,16 +1221,32 @@ static void amd_pstate_driver_cleanup(vo
current_pstate_driver = NULL;
}
+static int amd_pstate_set_driver(int mode_idx)
+{
+ if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
+ cppc_state = mode_idx;
+ if (cppc_state == AMD_PSTATE_DISABLE)
+ pr_info("driver is explicitly disabled\n");
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ current_pstate_driver = &amd_pstate_epp_driver;
+
+ if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
+ current_pstate_driver = &amd_pstate_driver;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static int amd_pstate_register_driver(int mode)
{
int ret;
- if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED)
- current_pstate_driver = &amd_pstate_driver;
- else if (mode == AMD_PSTATE_ACTIVE)
- current_pstate_driver = &amd_pstate_epp_driver;
- else
- return -EINVAL;
+ ret = amd_pstate_set_driver(mode);
+ if (ret)
+ return ret;
cppc_state = mode;
@@ -1756,25 +1772,6 @@ static struct cpufreq_driver amd_pstate_
.attr = amd_pstate_epp_attr,
};
-static int __init amd_pstate_set_driver(int mode_idx)
-{
- if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
- cppc_state = mode_idx;
- if (cppc_state == AMD_PSTATE_DISABLE)
- pr_info("driver is explicitly disabled\n");
-
- if (cppc_state == AMD_PSTATE_ACTIVE)
- current_pstate_driver = &amd_pstate_epp_driver;
-
- if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
- current_pstate_driver = &amd_pstate_driver;
-
- return 0;
- }
-
- return -EINVAL;
-}
-
/*
* CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F.
* show the debug message that helps to check if the CPU has CPPC support for loading issue.

View File

@@ -1,41 +0,0 @@
From 0f2929e03651c101f1a6fa8ccf40167eb48c1789 Mon Sep 17 00:00:00 2001
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Date: Thu, 17 Oct 2024 10:05:31 +0000
Subject: cpufreq/amd-pstate: Remove the switch case in amd_pstate_init()
Replace the switch case with a more readable if condition.
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
---
drivers/cpufreq/amd-pstate.c | 16 +++++-----------
1 file changed, 5 insertions(+), 11 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1873,21 +1873,15 @@ static int __init amd_pstate_init(void)
cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE;
}
- switch (cppc_state) {
- case AMD_PSTATE_DISABLE:
+ if (cppc_state == AMD_PSTATE_DISABLE) {
pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
- case AMD_PSTATE_PASSIVE:
- case AMD_PSTATE_ACTIVE:
- case AMD_PSTATE_GUIDED:
- ret = amd_pstate_set_driver(cppc_state);
- if (ret)
- return ret;
- break;
- default:
- return -EINVAL;
}
+ ret = amd_pstate_set_driver(cppc_state);
+ if (ret)
+ return ret;
+
/* capability check */
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");

View File

@@ -1,43 +0,0 @@
From bec664db265db933a107bac290601eb89acb938b Mon Sep 17 00:00:00 2001
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Date: Thu, 17 Oct 2024 10:05:33 +0000
Subject: cpufreq/amd-pstate: Remove the redundant amd_pstate_set_driver() call
amd_pstate_set_driver() is called twice, once in amd_pstate_init() and once
as part of amd_pstate_register_driver(). Move around code and eliminate
the redundancy.
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
---
drivers/cpufreq/amd-pstate.c | 12 ++++--------
1 file changed, 4 insertions(+), 8 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1878,9 +1878,11 @@ static int __init amd_pstate_init(void)
return -ENODEV;
}
- ret = amd_pstate_set_driver(cppc_state);
- if (ret)
+ ret = amd_pstate_register_driver(cppc_state);
+ if (ret) {
+ pr_err("failed to register with return %d\n", ret);
return ret;
+ }
/* capability check */
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
@@ -1900,12 +1902,6 @@ static int __init amd_pstate_init(void)
return ret;
}
- ret = amd_pstate_register_driver(cppc_state);
- if (ret) {
- pr_err("failed to register with return %d\n", ret);
- return ret;
- }
-
dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group);

View File

@@ -1,33 +0,0 @@
From 82d4b19d01912cd9c2cfb3c638a43cc2b04ffc63 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 17 Oct 2024 12:34:39 -0500
Subject: cpufreq/amd-pstate-ut: Add fix for min freq unit test
commit 642aff3964b0f ("cpufreq/amd-pstate: Set the initial min_freq to
lowest_nonlinear_freq") changed the iniital minimum frequency to lowest
nonlinear frequency, but the unit tests weren't updated and now fail.
Update them to match this same change.
Fixes: 642aff3964b0f ("cpufreq/amd-pstate: Set the initial min_freq to lowest_nonlinear_freq")
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate-ut.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -227,10 +227,10 @@ static void amd_pstate_ut_check_freq(u32
goto skip_test;
}
- if (cpudata->min_freq != policy->min) {
+ if (cpudata->lowest_nonlinear_freq != policy->min) {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
- pr_err("%s cpu%d cpudata_min_freq=%d policy_min=%d, they should be equal!\n",
- __func__, cpu, cpudata->min_freq, policy->min);
+ pr_err("%s cpu%d cpudata_lowest_nonlinear_freq=%d policy_min=%d, they should be equal!\n",
+ __func__, cpu, cpudata->lowest_nonlinear_freq, policy->min);
goto skip_test;
}

View File

@@ -1,44 +0,0 @@
From b1bc26a3ecd5afc4d6106332910fbdc3f6718acd Mon Sep 17 00:00:00 2001
From: Swapnil Sapkal <swapnil.sapkal@amd.com>
Date: Mon, 21 Oct 2024 15:48:36 +0530
Subject: amd-pstate: Switch to amd-pstate by default on some Server platforms
Currently the default cpufreq driver for all the AMD EPYC servers is
acpi-cpufreq. Going forward, switch to amd-pstate as the default
driver on the AMD EPYC server platforms with CPU family 0x1A or
higher. The default mode will be active mode.
Testing shows that amd-pstate with active mode and performance
governor provides comparable or better performance per-watt against
acpi-cpufreq + performance governor.
Likewise, amd-pstate with active mode and powersave governor with the
energy_performance_preference=power (EPP=255) provides comparable or
better performance per-watt against acpi-cpufreq + schedutil governor
for a wide range of workloads.
Users can still revert to using acpi-cpufreq driver on these platforms
with the "amd_pstate=disable" kernel commandline parameter.
Signed-off-by: Swapnil Sapkal <swapnil.sapkal@amd.com>
Signed-off-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1862,10 +1862,10 @@ static int __init amd_pstate_init(void)
if (cppc_state == AMD_PSTATE_UNDEFINED) {
/* Disable on the following configs by default:
* 1. Undefined platforms
- * 2. Server platforms
+ * 2. Server platforms with CPUs older than Family 0x1A.
*/
if (amd_pstate_acpi_pm_profile_undefined() ||
- amd_pstate_acpi_pm_profile_server()) {
+ (amd_pstate_acpi_pm_profile_server() && boot_cpu_data.x86 < 0x1A)) {
pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
}

View File

@@ -1,121 +0,0 @@
From ca32f306d11e95ca133a2f90249bb4555c2742c3 Mon Sep 17 00:00:00 2001
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Date: Wed, 23 Oct 2024 10:21:06 +0000
Subject: cpufreq/amd-pstate: Rename functions that enable CPPC
Explicitly rename functions that enable CPPC as *_cppc_*.
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Reviewed-by: Perry Yuan <perry.yuan@amd.com>
Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 26 +++++++++++++-------------
1 file changed, 13 insertions(+), 13 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -336,7 +336,7 @@ static int amd_pstate_set_energy_pref_in
return ret;
}
-static inline int msr_enable(bool enable)
+static inline int msr_cppc_enable(bool enable)
{
int ret, cpu;
unsigned long logical_proc_id_mask = 0;
@@ -362,7 +362,7 @@ static inline int msr_enable(bool enable
return 0;
}
-static int shmem_enable(bool enable)
+static int shmem_cppc_enable(bool enable)
{
int cpu, ret = 0;
struct cppc_perf_ctrls perf_ctrls;
@@ -389,11 +389,11 @@ static int shmem_enable(bool enable)
return ret;
}
-DEFINE_STATIC_CALL(amd_pstate_enable, msr_enable);
+DEFINE_STATIC_CALL(amd_pstate_cppc_enable, msr_cppc_enable);
-static inline int amd_pstate_enable(bool enable)
+static inline int amd_pstate_cppc_enable(bool enable)
{
- return static_call(amd_pstate_enable)(enable);
+ return static_call(amd_pstate_cppc_enable)(enable);
}
static int msr_init_perf(struct amd_cpudata *cpudata)
@@ -1072,7 +1072,7 @@ static int amd_pstate_cpu_resume(struct
{
int ret;
- ret = amd_pstate_enable(true);
+ ret = amd_pstate_cppc_enable(true);
if (ret)
pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
@@ -1083,7 +1083,7 @@ static int amd_pstate_cpu_suspend(struct
{
int ret;
- ret = amd_pstate_enable(false);
+ ret = amd_pstate_cppc_enable(false);
if (ret)
pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
@@ -1216,7 +1216,7 @@ static ssize_t show_energy_performance_p
static void amd_pstate_driver_cleanup(void)
{
- amd_pstate_enable(false);
+ amd_pstate_cppc_enable(false);
cppc_state = AMD_PSTATE_DISABLE;
current_pstate_driver = NULL;
}
@@ -1250,7 +1250,7 @@ static int amd_pstate_register_driver(in
cppc_state = mode;
- ret = amd_pstate_enable(true);
+ ret = amd_pstate_cppc_enable(true);
if (ret) {
pr_err("failed to enable cppc during amd-pstate driver registration, return %d\n",
ret);
@@ -1629,7 +1629,7 @@ static void amd_pstate_epp_reenable(stru
u64 value, max_perf;
int ret;
- ret = amd_pstate_enable(true);
+ ret = amd_pstate_cppc_enable(true);
if (ret)
pr_err("failed to enable amd pstate during resume, return %d\n", ret);
@@ -1716,7 +1716,7 @@ static int amd_pstate_epp_suspend(struct
cpudata->suspended = true;
/* disable CPPC in lowlevel firmware */
- ret = amd_pstate_enable(false);
+ ret = amd_pstate_cppc_enable(false);
if (ret)
pr_err("failed to suspend, return %d\n", ret);
@@ -1891,7 +1891,7 @@ static int __init amd_pstate_init(void)
current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
} else {
pr_debug("AMD CPPC shared memory based functionality is supported\n");
- static_call_update(amd_pstate_enable, shmem_enable);
+ static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable);
static_call_update(amd_pstate_init_perf, shmem_init_perf);
static_call_update(amd_pstate_update_perf, shmem_update_perf);
}
@@ -1916,7 +1916,7 @@ static int __init amd_pstate_init(void)
global_attr_free:
cpufreq_unregister_driver(current_pstate_driver);
- amd_pstate_enable(false);
+ amd_pstate_cppc_enable(false);
return ret;
}
device_initcall(amd_pstate_init);

View File

@@ -1,30 +0,0 @@
From de31d4a11e894a73ed7ef2388fb27f0bb4036de3 Mon Sep 17 00:00:00 2001
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Date: Wed, 23 Oct 2024 10:21:08 +0000
Subject: cpufreq/amd-pstate: Do not attempt to clear MSR_AMD_CPPC_ENABLE
MSR_AMD_CPPC_ENABLE is a write once register, i.e. attempting to clear
it is futile, it will not take effect. Hence, return if disable (0)
argument is passed to the msr_cppc_enable()
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 6 ++++++
1 file changed, 6 insertions(+)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -341,6 +341,12 @@ static inline int msr_cppc_enable(bool e
int ret, cpu;
unsigned long logical_proc_id_mask = 0;
+ /*
+ * MSR_AMD_CPPC_ENABLE is write-once, once set it cannot be cleared.
+ */
+ if (!enable)
+ return 0;
+
if (enable == cppc_enabled)
return 0;

View File

@@ -1,39 +0,0 @@
From c5eadae39ccdf6bada95c834147e285e445b12ad Mon Sep 17 00:00:00 2001
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Date: Wed, 23 Oct 2024 10:21:10 +0000
Subject: cpufreq/amd-pstate: Call cppc_set_epp_perf in the reenable function
The EPP value being set in perf_ctrls.energy_perf is not being propagated
to the shared memory, fix that.
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Reviewed-by: Perry Yuan <perry.yuan@amd.com>
Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1646,8 +1646,9 @@ static void amd_pstate_epp_reenable(stru
wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
} else {
perf_ctrls.max_perf = max_perf;
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
+ cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
}
}
@@ -1688,8 +1689,9 @@ static void amd_pstate_epp_offline(struc
} else {
perf_ctrls.desired_perf = 0;
perf_ctrls.max_perf = min_perf;
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
+ cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
}
mutex_unlock(&amd_pstate_limits_lock);
}

View File

@@ -1,25 +0,0 @@
From 275a7a8c95ac5d0c0ff586acd882d751478f6a00 Mon Sep 17 00:00:00 2001
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Date: Wed, 23 Oct 2024 10:21:12 +0000
Subject: cpufreq/amd-pstate: Align offline flow of shared memory and MSR based
systems
Set min_perf to lowest_perf for shared memory systems, similar to the MSR
based systems.
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 1 +
1 file changed, 1 insertion(+)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1688,6 +1688,7 @@ static void amd_pstate_epp_offline(struc
wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
} else {
perf_ctrls.desired_perf = 0;
+ perf_ctrls.min_perf = min_perf;
perf_ctrls.max_perf = min_perf;
cppc_set_perf(cpudata->cpu, &perf_ctrls);
perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);

View File

@@ -1,60 +0,0 @@
From 173172a4604a050ccb5a6745acd9fa90d01257a7 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Fri, 25 Oct 2024 12:14:55 -0500
Subject: x86/cpufeatures: Rename X86_FEATURE_FAST_CPPC to have AMD prefix
This feature is an AMD unique feature of some processors, so put
AMD into the name.
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
arch/x86/include/asm/cpufeatures.h | 2 +-
arch/x86/kernel/cpu/scattered.c | 2 +-
drivers/cpufreq/amd-pstate.c | 2 +-
tools/arch/x86/include/asm/cpufeatures.h | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -473,7 +473,7 @@
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* BHI_DIS_S HW control available */
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* BHI_DIS_S HW control enabled */
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */
-#define X86_FEATURE_FAST_CPPC (21*32 + 5) /* AMD Fast CPPC */
+#define X86_FEATURE_AMD_FAST_CPPC (21*32 + 5) /* Fast CPPC */
/*
* BUG word(s)
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -45,7 +45,7 @@ static const struct cpuid_bit cpuid_bits
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
- { X86_FEATURE_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
+ { X86_FEATURE_AMD_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -880,7 +880,7 @@ static u32 amd_pstate_get_transition_del
transition_delay_ns = cppc_get_transition_latency(cpu);
if (transition_delay_ns == CPUFREQ_ETERNAL) {
- if (cpu_feature_enabled(X86_FEATURE_FAST_CPPC))
+ if (cpu_feature_enabled(X86_FEATURE_AMD_FAST_CPPC))
return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
else
return AMD_PSTATE_TRANSITION_DELAY;
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -472,7 +472,7 @@
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* BHI_DIS_S HW control available */
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* BHI_DIS_S HW control enabled */
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */
-#define X86_FEATURE_FAST_CPPC (21*32 + 5) /* AMD Fast CPPC */
+#define X86_FEATURE_AMD_FAST_CPPC (21*32 + 5) /* AMD Fast CPPC */
/*
* BUG word(s)

View File

@@ -1,43 +0,0 @@
From 34ef29430b9a217ef0811f218b8f86c631d6c574 Mon Sep 17 00:00:00 2001
From: Perry Yuan <perry.yuan@amd.com>
Date: Fri, 25 Oct 2024 12:14:56 -0500
Subject: x86/cpufeatures: Add feature bits for AMD heterogeneous processor
CPUID leaf 0x80000026 advertises core types with different efficiency
rankings.
Bit 30 indicates the heterogeneous core topology feature, if the bit
set, it means not all instances at the current hierarchical level have
the same core topology.
This is described in the AMD64 Architecture Programmers Manual Volume
2 and 3, doc ID #25493 and #25494.
Signed-off-by: Perry Yuan <perry.yuan@amd.com>
Co-developed-by: Mario Limonciello <mario.limonciello@amd.com>
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
arch/x86/include/asm/cpufeatures.h | 1 +
arch/x86/kernel/cpu/scattered.c | 1 +
2 files changed, 2 insertions(+)
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -474,6 +474,7 @@
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* BHI_DIS_S HW control enabled */
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */
#define X86_FEATURE_AMD_FAST_CPPC (21*32 + 5) /* Fast CPPC */
+#define X86_FEATURE_AMD_HETEROGENEOUS_CORES (21*32 + 6) /* Heterogeneous Core Topology */
/*
* BUG word(s)
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -52,6 +52,7 @@ static const struct cpuid_bit cpuid_bits
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 },
+ { X86_FEATURE_AMD_HETEROGENEOUS_CORES, CPUID_EAX, 30, 0x80000026, 0 },
{ 0, 0, 0, 0, 0 }
};

View File

@@ -1,32 +0,0 @@
From 5bd4c7b75f5588eb81bee35179569e27a4e164e4 Mon Sep 17 00:00:00 2001
From: Perry Yuan <perry.yuan@amd.com>
Date: Fri, 25 Oct 2024 12:14:57 -0500
Subject: x86/cpu: Enable SD_ASYM_PACKING for PKG Domain on AMD Processors
Enable the SD_ASYM_PACKING domain flag for the PKG domain on AMD
heterogeneous processors.
This flag is beneficial for processors with one or more CCDs and
relies on x86_sched_itmt_flags().
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Signed-off-by: Perry Yuan <perry.yuan@amd.com>
Co-developed-by: Mario Limonciello <mario.limonciello@amd.com>
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
arch/x86/kernel/smpboot.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -497,8 +497,9 @@ static int x86_cluster_flags(void)
static int x86_die_flags(void)
{
- if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
- return x86_sched_itmt_flags();
+ if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU) ||
+ cpu_feature_enabled(X86_FEATURE_AMD_HETEROGENEOUS_CORES))
+ return x86_sched_itmt_flags();
return 0;
}

View File

@@ -1,187 +0,0 @@
From c30f11be765d5a5a68e975fbe720a4bdb6900388 Mon Sep 17 00:00:00 2001
From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Date: Fri, 25 Oct 2024 12:14:58 -0500
Subject: x86/cpu: Add CPU type to struct cpuinfo_topology
Sometimes it is required to take actions based on if a CPU is a performance
or efficiency core. As an example, intel_pstate driver uses the Intel
core-type to determine CPU scaling. Also, some CPU vulnerabilities only
affect a specific CPU type, like RFDS only affects Intel Atom. Hybrid
systems that have variants P+E, P-only(Core) and E-only(Atom), it is not
straightforward to identify which variant is affected by a type specific
vulnerability.
Such processors do have CPUID field that can uniquely identify them. Like,
P+E, P-only and E-only enumerates CPUID.1A.CORE_TYPE identification, while
P+E additionally enumerates CPUID.7.HYBRID. Based on this information, it
is possible for boot CPU to identify if a system has mixed CPU types.
Add a new field hw_cpu_type to struct cpuinfo_topology that stores the
hardware specific CPU type. This saves the overhead of IPIs to get the CPU
type of a different CPU. CPU type is populated early in the boot process,
before vulnerabilities are enumerated.
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Co-developed-by: Mario Limonciello <mario.limonciello@amd.com>
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
arch/x86/include/asm/intel-family.h | 6 +++++
arch/x86/include/asm/processor.h | 18 ++++++++++++++
arch/x86/include/asm/topology.h | 9 +++++++
arch/x86/kernel/cpu/debugfs.c | 1 +
arch/x86/kernel/cpu/topology_amd.c | 3 +++
arch/x86/kernel/cpu/topology_common.c | 34 +++++++++++++++++++++++++++
6 files changed, 71 insertions(+)
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -183,4 +183,10 @@
/* Family 19 */
#define INTEL_PANTHERCOVE_X IFM(19, 0x01) /* Diamond Rapids */
+/* CPU core types */
+enum intel_cpu_type {
+ INTEL_CPU_TYPE_ATOM = 0x20,
+ INTEL_CPU_TYPE_CORE = 0x40,
+};
+
#endif /* _ASM_X86_INTEL_FAMILY_H */
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -105,6 +105,24 @@ struct cpuinfo_topology {
// Cache level topology IDs
u32 llc_id;
u32 l2c_id;
+
+ // Hardware defined CPU-type
+ union {
+ u32 cpu_type;
+ struct {
+ // CPUID.1A.EAX[23-0]
+ u32 intel_native_model_id :24;
+ // CPUID.1A.EAX[31-24]
+ u32 intel_type :8;
+ };
+ struct {
+ // CPUID 0x80000026.EBX
+ u32 amd_num_processors :16,
+ amd_power_eff_ranking :8,
+ amd_native_model_id :4,
+ amd_type :4;
+ };
+ };
};
struct cpuinfo_x86 {
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -114,6 +114,12 @@ enum x86_topology_domains {
TOPO_MAX_DOMAIN,
};
+enum x86_topology_cpu_type {
+ TOPO_CPU_TYPE_PERFORMANCE,
+ TOPO_CPU_TYPE_EFFICIENCY,
+ TOPO_CPU_TYPE_UNKNOWN,
+};
+
struct x86_topology_system {
unsigned int dom_shifts[TOPO_MAX_DOMAIN];
unsigned int dom_size[TOPO_MAX_DOMAIN];
@@ -149,6 +155,9 @@ extern unsigned int __max_threads_per_co
extern unsigned int __num_threads_per_package;
extern unsigned int __num_cores_per_package;
+const char *get_topology_cpu_type_name(struct cpuinfo_x86 *c);
+enum x86_topology_cpu_type get_topology_cpu_type(struct cpuinfo_x86 *c);
+
static inline unsigned int topology_max_packages(void)
{
return __max_logical_packages;
--- a/arch/x86/kernel/cpu/debugfs.c
+++ b/arch/x86/kernel/cpu/debugfs.c
@@ -22,6 +22,7 @@ static int cpu_debug_show(struct seq_fil
seq_printf(m, "die_id: %u\n", c->topo.die_id);
seq_printf(m, "cu_id: %u\n", c->topo.cu_id);
seq_printf(m, "core_id: %u\n", c->topo.core_id);
+ seq_printf(m, "cpu_type: %s\n", get_topology_cpu_type_name(c));
seq_printf(m, "logical_pkg_id: %u\n", c->topo.logical_pkg_id);
seq_printf(m, "logical_die_id: %u\n", c->topo.logical_die_id);
seq_printf(m, "llc_id: %u\n", c->topo.llc_id);
--- a/arch/x86/kernel/cpu/topology_amd.c
+++ b/arch/x86/kernel/cpu/topology_amd.c
@@ -182,6 +182,9 @@ static void parse_topology_amd(struct to
if (cpu_feature_enabled(X86_FEATURE_TOPOEXT))
has_topoext = cpu_parse_topology_ext(tscan);
+ if (cpu_feature_enabled(X86_FEATURE_AMD_HETEROGENEOUS_CORES))
+ tscan->c->topo.cpu_type = cpuid_ebx(0x80000026);
+
if (!has_topoext && !parse_8000_0008(tscan))
return;
--- a/arch/x86/kernel/cpu/topology_common.c
+++ b/arch/x86/kernel/cpu/topology_common.c
@@ -3,6 +3,7 @@
#include <xen/xen.h>
+#include <asm/intel-family.h>
#include <asm/apic.h>
#include <asm/processor.h>
#include <asm/smp.h>
@@ -27,6 +28,36 @@ void topology_set_dom(struct topo_scan *
}
}
+enum x86_topology_cpu_type get_topology_cpu_type(struct cpuinfo_x86 *c)
+{
+ if (c->x86_vendor == X86_VENDOR_INTEL) {
+ switch (c->topo.intel_type) {
+ case INTEL_CPU_TYPE_ATOM: return TOPO_CPU_TYPE_EFFICIENCY;
+ case INTEL_CPU_TYPE_CORE: return TOPO_CPU_TYPE_PERFORMANCE;
+ }
+ }
+ if (c->x86_vendor == X86_VENDOR_AMD) {
+ switch (c->topo.amd_type) {
+ case 0: return TOPO_CPU_TYPE_PERFORMANCE;
+ case 1: return TOPO_CPU_TYPE_EFFICIENCY;
+ }
+ }
+
+ return TOPO_CPU_TYPE_UNKNOWN;
+}
+
+const char *get_topology_cpu_type_name(struct cpuinfo_x86 *c)
+{
+ switch (get_topology_cpu_type(c)) {
+ case TOPO_CPU_TYPE_PERFORMANCE:
+ return "performance";
+ case TOPO_CPU_TYPE_EFFICIENCY:
+ return "efficiency";
+ default:
+ return "unknown";
+ }
+}
+
static unsigned int __maybe_unused parse_num_cores_legacy(struct cpuinfo_x86 *c)
{
struct {
@@ -87,6 +118,7 @@ static void parse_topology(struct topo_s
.cu_id = 0xff,
.llc_id = BAD_APICID,
.l2c_id = BAD_APICID,
+ .cpu_type = TOPO_CPU_TYPE_UNKNOWN,
};
struct cpuinfo_x86 *c = tscan->c;
struct {
@@ -132,6 +164,8 @@ static void parse_topology(struct topo_s
case X86_VENDOR_INTEL:
if (!IS_ENABLED(CONFIG_CPU_SUP_INTEL) || !cpu_parse_topology_ext(tscan))
parse_legacy(tscan);
+ if (c->cpuid_level >= 0x1a)
+ c->topo.cpu_type = cpuid_eax(0x1a);
break;
case X86_VENDOR_HYGON:
if (IS_ENABLED(CONFIG_CPU_SUP_HYGON))

View File

@@ -1,71 +0,0 @@
From ab9618cbe5e3d55b09b59f5e18e890be80ca1076 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Fri, 25 Oct 2024 12:14:59 -0500
Subject: x86/amd: Use heterogeneous core topology for identifying boost
numerator
AMD heterogeneous designs include two types of cores:
* Performance
* Efficiency
Each core type has different highest performance values configured by the
platform. Drivers such as `amd_pstate` need to identify the type of
core to correctly set an appropriate boost numerator to calculate the
maximum frequency.
X86_FEATURE_AMD_HETEROGENEOUS_CORES is used to identify whether the SoC
supports heterogeneous core type by reading CPUID leaf Fn_0x80000026.
On performance cores the scaling factor of 196 is used. On efficiency
cores the scaling factor is the value reported as the highest perf.
Efficiency cores have the same preferred core rankings.
Tested-by: Eric Naim <dnaim@cachyos.org>
Tested-by: Peter Jung <ptr1337@cachyos.org>
Suggested-by: Perry Yuan <perry.yuan@amd.com>
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
arch/x86/kernel/acpi/cppc.c | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
--- a/arch/x86/kernel/acpi/cppc.c
+++ b/arch/x86/kernel/acpi/cppc.c
@@ -239,8 +239,10 @@ EXPORT_SYMBOL_GPL(amd_detect_prefcore);
*/
int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
{
+ enum x86_topology_cpu_type core_type = get_topology_cpu_type(&cpu_data(cpu));
bool prefcore;
int ret;
+ u32 tmp;
ret = amd_detect_prefcore(&prefcore);
if (ret)
@@ -266,6 +268,27 @@ int amd_get_boost_ratio_numerator(unsign
break;
}
}
+
+ /* detect if running on heterogeneous design */
+ if (cpu_feature_enabled(X86_FEATURE_AMD_HETEROGENEOUS_CORES)) {
+ switch (core_type) {
+ case TOPO_CPU_TYPE_UNKNOWN:
+ pr_warn("Undefined core type found for cpu %d\n", cpu);
+ break;
+ case TOPO_CPU_TYPE_PERFORMANCE:
+ /* use the max scale for performance cores */
+ *numerator = CPPC_HIGHEST_PERF_PERFORMANCE;
+ return 0;
+ case TOPO_CPU_TYPE_EFFICIENCY:
+ /* use the highest perf value for efficiency cores */
+ ret = amd_get_highest_perf(cpu, &tmp);
+ if (ret)
+ return ret;
+ *numerator = tmp;
+ return 0;
+ }
+ }
+
*numerator = CPPC_HIGHEST_PERF_PREFCORE;
return 0;

View File

@@ -1,38 +0,0 @@
From f1a423d6cfc0888638642bea1b8ed5c64770888c Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Mon, 28 Oct 2024 09:55:41 -0500
Subject: cpufreq/amd-pstate: Push adjust_perf vfunc init into cpu_init
As the driver can be changed in and out of different modes it's possible
that adjust_perf is assigned when it shouldn't be.
This could happen if an MSR design is started up in passive mode and then
switches to active mode.
To solve this explicitly clear `adjust_perf` in amd_pstate_epp_cpu_init().
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1534,6 +1534,8 @@ static int amd_pstate_epp_cpu_init(struc
WRITE_ONCE(cpudata->cppc_cap1_cached, value);
}
+ current_pstate_driver->adjust_perf = NULL;
+
return 0;
free_cpudata1:
@@ -1896,8 +1898,6 @@ static int __init amd_pstate_init(void)
/* capability check */
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");
- if (cppc_state != AMD_PSTATE_ACTIVE)
- current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
} else {
pr_debug("AMD CPPC shared memory based functionality is supported\n");
static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable);

View File

@@ -1,47 +0,0 @@
From 242ffb086a15c8aec549096a6ad96d186b670fa9 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Mon, 28 Oct 2024 09:55:42 -0500
Subject: cpufreq/amd-pstate: Move registration after static function call
update
On shared memory designs the static functions need to work before
registration is done or the system can hang at bootup.
Move the registration later in amd_pstate_init() to solve this.
Fixes: e238968a2087 ("cpufreq/amd-pstate: Remove the redundant amd_pstate_set_driver() call")
Reported-by: Klara Modin <klarasmodin@gmail.com>
Closes: https://lore.kernel.org/linux-pm/cf9c146d-bacf-444e-92e2-15ebf513af96@gmail.com/#t
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1889,12 +1889,6 @@ static int __init amd_pstate_init(void)
return -ENODEV;
}
- ret = amd_pstate_register_driver(cppc_state);
- if (ret) {
- pr_err("failed to register with return %d\n", ret);
- return ret;
- }
-
/* capability check */
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");
@@ -1905,6 +1899,12 @@ static int __init amd_pstate_init(void)
static_call_update(amd_pstate_update_perf, shmem_update_perf);
}
+ ret = amd_pstate_register_driver(cppc_state);
+ if (ret) {
+ pr_err("failed to register with return %d\n", ret);
+ return ret;
+ }
+
if (amd_pstate_prefcore) {
ret = amd_detect_prefcore(&amd_pstate_prefcore);
if (ret)

View File

@@ -1,152 +0,0 @@
From f6e6b4ebc5ef3ad05b291928d31f597210a2eb6e Mon Sep 17 00:00:00 2001
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Date: Wed, 4 Dec 2024 14:48:38 +0000
Subject: cpufreq/amd-pstate: Convert the amd_pstate_get/set_epp() to static
calls
MSR and shared memory based systems have different mechanisms to get and
set the epp value. Split those mechanisms into different functions and
assign them appropriately to the static calls at boot time. This eliminates
the need for the "if(cpu_feature_enabled(X86_FEATURE_CPPC))" checks at
runtime.
Also, propagate the error code from rdmsrl_on_cpu() and cppc_get_epp_perf()
to *_get_epp()'s caller, instead of returning -EIO unconditionally.
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 92 +++++++++++++++++++++++-------------
1 file changed, 60 insertions(+), 32 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -210,26 +210,40 @@ static inline int get_mode_idx_from_str(
static DEFINE_MUTEX(amd_pstate_limits_lock);
static DEFINE_MUTEX(amd_pstate_driver_lock);
-static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
+static s16 msr_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
{
u64 epp;
int ret;
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- if (!cppc_req_cached) {
- epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- &cppc_req_cached);
- if (epp)
- return epp;
- }
- epp = (cppc_req_cached >> 24) & 0xFF;
- } else {
- ret = cppc_get_epp_perf(cpudata->cpu, &epp);
+ if (!cppc_req_cached) {
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req_cached);
if (ret < 0) {
pr_debug("Could not retrieve energy perf value (%d)\n", ret);
- return -EIO;
+ return ret;
}
}
+ epp = (cppc_req_cached >> 24) & 0xFF;
+
+ return (s16)epp;
+}
+
+DEFINE_STATIC_CALL(amd_pstate_get_epp, msr_get_epp);
+
+static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
+{
+ return static_call(amd_pstate_get_epp)(cpudata, cppc_req_cached);
+}
+
+static s16 shmem_get_epp(struct amd_cpudata *cpudata, u64 dummy)
+{
+ u64 epp;
+ int ret;
+
+ ret = cppc_get_epp_perf(cpudata->cpu, &epp);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return ret;
+ }
return (s16)(epp & 0xff);
}
@@ -283,33 +297,45 @@ static inline void amd_pstate_update_per
max_perf, fast_switch);
}
-static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
{
int ret;
- struct cppc_perf_ctrls perf_ctrls;
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- u64 value = READ_ONCE(cpudata->cppc_req_cached);
+ u64 value = READ_ONCE(cpudata->cppc_req_cached);
- value &= ~GENMASK_ULL(31, 24);
- value |= (u64)epp << 24;
- WRITE_ONCE(cpudata->cppc_req_cached, value);
-
- ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- if (!ret)
- cpudata->epp_cached = epp;
- } else {
- amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
- cpudata->max_limit_perf, false);
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
- perf_ctrls.energy_perf = epp;
- ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
- if (ret) {
- pr_debug("failed to set energy perf value (%d)\n", ret);
- return ret;
- }
+ ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ if (!ret)
cpudata->epp_cached = epp;
+
+ return ret;
+}
+
+DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
+
+static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+{
+ return static_call(amd_pstate_set_epp)(cpudata, epp);
+}
+
+static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
+{
+ int ret;
+ struct cppc_perf_ctrls perf_ctrls;
+
+ amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
+ cpudata->max_limit_perf, false);
+
+ perf_ctrls.energy_perf = epp;
+ ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+ if (ret) {
+ pr_debug("failed to set energy perf value (%d)\n", ret);
+ return ret;
}
+ cpudata->epp_cached = epp;
return ret;
}
@@ -1897,6 +1923,8 @@ static int __init amd_pstate_init(void)
static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable);
static_call_update(amd_pstate_init_perf, shmem_init_perf);
static_call_update(amd_pstate_update_perf, shmem_update_perf);
+ static_call_update(amd_pstate_get_epp, shmem_get_epp);
+ static_call_update(amd_pstate_set_epp, shmem_set_epp);
}
ret = amd_pstate_register_driver(cppc_state);

View File

@@ -1,38 +0,0 @@
From e73d7f9cb6d8f0e79860788371c88f88057ae34b Mon Sep 17 00:00:00 2001
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Date: Wed, 4 Dec 2024 14:48:39 +0000
Subject: cpufreq/amd-pstate: Move the invocation of amd_pstate_update_perf()
amd_pstate_update_perf() should not be a part of shmem_set_epp() function,
so move it to the amd_pstate_epp_update_limit() function, where it is needed.
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -326,9 +326,6 @@ static int shmem_set_epp(struct amd_cpud
int ret;
struct cppc_perf_ctrls perf_ctrls;
- amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
- cpudata->max_limit_perf, false);
-
perf_ctrls.energy_perf = epp;
ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
if (ret) {
@@ -1628,6 +1625,10 @@ static int amd_pstate_epp_update_limit(s
epp = 0;
WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
+ cpudata->max_limit_perf, false);
+
return amd_pstate_set_epp(cpudata, epp);
}

View File

@@ -1,82 +0,0 @@
From 021028e977fcd835ed92a4543f7977a8aa0c1dd6 Mon Sep 17 00:00:00 2001
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Date: Wed, 4 Dec 2024 14:48:40 +0000
Subject: cpufreq/amd-pstate: Refactor amd_pstate_epp_reenable() and
amd_pstate_epp_offline()
Replace similar code chunks with amd_pstate_update_perf() and
amd_pstate_set_epp() function calls.
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 38 +++++++-----------------------------
1 file changed, 7 insertions(+), 31 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1660,25 +1660,17 @@ static int amd_pstate_epp_set_policy(str
static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
{
- struct cppc_perf_ctrls perf_ctrls;
- u64 value, max_perf;
+ u64 max_perf;
int ret;
ret = amd_pstate_cppc_enable(true);
if (ret)
pr_err("failed to enable amd pstate during resume, return %d\n", ret);
- value = READ_ONCE(cpudata->cppc_req_cached);
max_perf = READ_ONCE(cpudata->highest_perf);
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- } else {
- perf_ctrls.max_perf = max_perf;
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
- cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
- }
+ amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
+ amd_pstate_set_epp(cpudata, cpudata->epp_cached);
}
static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
@@ -1698,31 +1690,15 @@ static int amd_pstate_epp_cpu_online(str
static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- struct cppc_perf_ctrls perf_ctrls;
int min_perf;
- u64 value;
min_perf = READ_ONCE(cpudata->lowest_perf);
- value = READ_ONCE(cpudata->cppc_req_cached);
mutex_lock(&amd_pstate_limits_lock);
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
- /* Set max perf same as min perf */
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(min_perf);
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- } else {
- perf_ctrls.desired_perf = 0;
- perf_ctrls.min_perf = min_perf;
- perf_ctrls.max_perf = min_perf;
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
- cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
- }
+ amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
+ amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);
+
mutex_unlock(&amd_pstate_limits_lock);
}

View File

@@ -1,43 +0,0 @@
From f94f6415c70f76d885dd542e8631f826f427941e Mon Sep 17 00:00:00 2001
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Date: Wed, 4 Dec 2024 14:48:41 +0000
Subject: cpufreq/amd-pstate: Remove the cppc_state check in offline/online
functions
Only amd_pstate_epp driver (i.e. cppc_state = ACTIVE) enters the
amd_pstate_epp_offline() and amd_pstate_epp_cpu_online() functions,
so remove the unnecessary if condition checking if cppc_state is
equal to AMD_PSTATE_ACTIVE.
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
---
drivers/cpufreq/amd-pstate.c | 9 +++------
1 file changed, 3 insertions(+), 6 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1679,10 +1679,8 @@ static int amd_pstate_epp_cpu_online(str
pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
- if (cppc_state == AMD_PSTATE_ACTIVE) {
- amd_pstate_epp_reenable(cpudata);
- cpudata->suspended = false;
- }
+ amd_pstate_epp_reenable(cpudata);
+ cpudata->suspended = false;
return 0;
}
@@ -1711,8 +1709,7 @@ static int amd_pstate_epp_cpu_offline(st
if (cpudata->suspended)
return 0;
- if (cppc_state == AMD_PSTATE_ACTIVE)
- amd_pstate_epp_offline(policy);
+ amd_pstate_epp_offline(policy);
return 0;
}

View File

@@ -1,56 +0,0 @@
From 5aea3e8c4255cb04876e3af714d58ed329376b7f Mon Sep 17 00:00:00 2001
From: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Date: Wed, 4 Dec 2024 14:48:42 +0000
Subject: cpufreq/amd-pstate: Merge amd_pstate_epp_cpu_offline() and
amd_pstate_epp_offline()
amd_pstate_epp_offline() is only called from within
amd_pstate_epp_cpu_offline() and doesn't make much sense to have it at all.
Hence, remove it.
Also remove the unncessary debug print in the offline path while at it.
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 17 ++++-------------
1 file changed, 4 insertions(+), 13 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1685,11 +1685,14 @@ static int amd_pstate_epp_cpu_online(str
return 0;
}
-static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
+static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
int min_perf;
+ if (cpudata->suspended)
+ return 0;
+
min_perf = READ_ONCE(cpudata->lowest_perf);
mutex_lock(&amd_pstate_limits_lock);
@@ -1698,18 +1701,6 @@ static void amd_pstate_epp_offline(struc
amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);
mutex_unlock(&amd_pstate_limits_lock);
-}
-
-static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
-{
- struct amd_cpudata *cpudata = policy->driver_data;
-
- pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
-
- if (cpudata->suspended)
- return 0;
-
- amd_pstate_epp_offline(policy);
return 0;
}

View File

@@ -1,132 +0,0 @@
From ea6b500eb38124a59e83254435340e0390117c54 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 5 Dec 2024 16:28:33 -0600
Subject: cpufreq/amd-pstate: Add trace event for EPP perf updates
In "active" mode the most important thing for debugging whether
an issue is hardware or software based is to look at what was the
last thing written to the CPPC request MSR or shared memory region.
The 'amd_pstate_epp_perf' trace event shows the values being written
for all CPUs.
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
Reviewed-by: Perry Yuan <perry.yuan@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
---
drivers/cpufreq/amd-pstate-trace.h | 45 ++++++++++++++++++++++++++++++
drivers/cpufreq/amd-pstate.c | 28 +++++++++++++++++++
2 files changed, 73 insertions(+)
--- a/drivers/cpufreq/amd-pstate-trace.h
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -88,6 +88,51 @@ TRACE_EVENT(amd_pstate_perf,
)
);
+TRACE_EVENT(amd_pstate_epp_perf,
+
+ TP_PROTO(unsigned int cpu_id,
+ unsigned int highest_perf,
+ unsigned int epp,
+ unsigned int min_perf,
+ unsigned int max_perf,
+ bool boost
+ ),
+
+ TP_ARGS(cpu_id,
+ highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ boost),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu_id)
+ __field(unsigned int, highest_perf)
+ __field(unsigned int, epp)
+ __field(unsigned int, min_perf)
+ __field(unsigned int, max_perf)
+ __field(bool, boost)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = cpu_id;
+ __entry->highest_perf = highest_perf;
+ __entry->epp = epp;
+ __entry->min_perf = min_perf;
+ __entry->max_perf = max_perf;
+ __entry->boost = boost;
+ ),
+
+ TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
+ (unsigned int)__entry->cpu_id,
+ (unsigned int)__entry->min_perf,
+ (unsigned int)__entry->max_perf,
+ (unsigned int)__entry->highest_perf,
+ (unsigned int)__entry->epp,
+ (bool)__entry->boost
+ )
+);
+
#endif /* _AMD_PSTATE_TRACE_H */
/* This part must be outside protection */
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -354,6 +354,14 @@ static int amd_pstate_set_energy_pref_in
return -EBUSY;
}
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ epp,
+ AMD_CPPC_MIN_PERF(cpudata->cppc_req_cached),
+ AMD_CPPC_MAX_PERF(cpudata->cppc_req_cached),
+ cpudata->boost_state);
+ }
+
ret = amd_pstate_set_epp(cpudata, epp);
return ret;
@@ -1626,6 +1634,13 @@ static int amd_pstate_epp_update_limit(s
WRITE_ONCE(cpudata->cppc_req_cached, value);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
+ cpudata->min_limit_perf,
+ cpudata->max_limit_perf,
+ cpudata->boost_state);
+ }
+
amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
cpudata->max_limit_perf, false);
@@ -1669,6 +1684,13 @@ static void amd_pstate_epp_reenable(stru
max_perf = READ_ONCE(cpudata->highest_perf);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ cpudata->epp_cached,
+ AMD_CPPC_MIN_PERF(cpudata->cppc_req_cached),
+ max_perf, cpudata->boost_state);
+ }
+
amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
amd_pstate_set_epp(cpudata, cpudata->epp_cached);
}
@@ -1697,6 +1719,12 @@ static int amd_pstate_epp_cpu_offline(st
mutex_lock(&amd_pstate_limits_lock);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ AMD_CPPC_EPP_BALANCE_POWERSAVE,
+ min_perf, min_perf, cpudata->boost_state);
+ }
+
amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);

View File

@@ -1,123 +0,0 @@
From 57fdcf14dcd80808fc46edea6d31bb2a699c0fd1 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 5 Dec 2024 16:28:34 -0600
Subject: cpufreq/amd-pstate: convert mutex use to guard()
Using scoped guard declaration will unlock mutexes automatically.
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
---
drivers/cpufreq/amd-pstate.c | 32 ++++++++++++--------------------
1 file changed, 12 insertions(+), 20 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -782,12 +782,12 @@ static int amd_pstate_set_boost(struct c
pr_err("Boost mode is not supported by this processor or SBIOS\n");
return -EOPNOTSUPP;
}
- mutex_lock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
+
ret = amd_pstate_cpu_boost_update(policy, state);
WRITE_ONCE(cpudata->boost_state, !ret ? state : false);
policy->boost_enabled = !ret ? state : false;
refresh_frequency_limits(policy);
- mutex_unlock(&amd_pstate_driver_lock);
return ret;
}
@@ -878,7 +878,8 @@ static void amd_pstate_update_limits(uns
if (!amd_pstate_prefcore)
return;
- mutex_lock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
+
ret = amd_get_highest_perf(cpu, &cur_high);
if (ret)
goto free_cpufreq_put;
@@ -898,7 +899,6 @@ free_cpufreq_put:
if (!highest_perf_changed)
cpufreq_update_policy(cpu);
- mutex_unlock(&amd_pstate_driver_lock);
}
/*
@@ -1231,11 +1231,11 @@ static ssize_t store_energy_performance_
if (ret < 0)
return -EINVAL;
- mutex_lock(&amd_pstate_limits_lock);
+ guard(mutex)(&amd_pstate_limits_lock);
+
ret = amd_pstate_set_energy_pref_index(cpudata, ret);
- mutex_unlock(&amd_pstate_limits_lock);
- return ret ?: count;
+ return ret ? ret : count;
}
static ssize_t show_energy_performance_preference(
@@ -1399,13 +1399,10 @@ EXPORT_SYMBOL_GPL(amd_pstate_update_stat
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- ssize_t ret;
- mutex_lock(&amd_pstate_driver_lock);
- ret = amd_pstate_show_status(buf);
- mutex_unlock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
- return ret;
+ return amd_pstate_show_status(buf);
}
static ssize_t status_store(struct device *a, struct device_attribute *b,
@@ -1414,9 +1411,8 @@ static ssize_t status_store(struct devic
char *p = memchr(buf, '\n', count);
int ret;
- mutex_lock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_update_status(buf, p ? p - buf : count);
- mutex_unlock(&amd_pstate_driver_lock);
return ret < 0 ? ret : count;
}
@@ -1717,7 +1713,7 @@ static int amd_pstate_epp_cpu_offline(st
min_perf = READ_ONCE(cpudata->lowest_perf);
- mutex_lock(&amd_pstate_limits_lock);
+ guard(mutex)(&amd_pstate_limits_lock);
if (trace_amd_pstate_epp_perf_enabled()) {
trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
@@ -1728,8 +1724,6 @@ static int amd_pstate_epp_cpu_offline(st
amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);
- mutex_unlock(&amd_pstate_limits_lock);
-
return 0;
}
@@ -1758,13 +1752,11 @@ static int amd_pstate_epp_resume(struct
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata->suspended) {
- mutex_lock(&amd_pstate_limits_lock);
+ guard(mutex)(&amd_pstate_limits_lock);
/* enable amd pstate from suspend state*/
amd_pstate_epp_reenable(cpudata);
- mutex_unlock(&amd_pstate_limits_lock);
-
cpudata->suspended = false;
}

View File

@@ -1,52 +0,0 @@
From 06c52df851256238253ec286682cafbd5bb832d8 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 5 Dec 2024 16:28:35 -0600
Subject: cpufreq/amd-pstate: Drop cached epp_policy variable
epp_policy is not used by any of the current code and there
is no need to cache it.
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
---
drivers/cpufreq/amd-pstate.c | 3 ---
drivers/cpufreq/amd-pstate.h | 2 --
2 files changed, 5 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1506,7 +1506,6 @@ static int amd_pstate_epp_cpu_init(struc
return -ENOMEM;
cpudata->cpu = policy->cpu;
- cpudata->epp_policy = 0;
ret = amd_pstate_init_perf(cpudata);
if (ret)
@@ -1613,8 +1612,6 @@ static int amd_pstate_epp_update_limit(s
value &= ~AMD_CPPC_DES_PERF(~0L);
value |= AMD_CPPC_DES_PERF(0);
- cpudata->epp_policy = cpudata->policy;
-
/* Get BIOS pre-defined epp value */
epp = amd_pstate_get_epp(cpudata, value);
if (epp < 0) {
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -57,7 +57,6 @@ struct amd_aperf_mperf {
* @hw_prefcore: check whether HW supports preferred core featue.
* Only when hw_prefcore and early prefcore param are true,
* AMD P-State driver supports preferred core featue.
- * @epp_policy: Last saved policy used to set energy-performance preference
* @epp_cached: Cached CPPC energy-performance preference value
* @policy: Cpufreq policy value
* @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
@@ -94,7 +93,6 @@ struct amd_cpudata {
bool hw_prefcore;
/* EPP feature related attributes*/
- s16 epp_policy;
s16 epp_cached;
u32 policy;
u64 cppc_cap1_cached;

View File

@@ -1,117 +0,0 @@
From da3e84f3dd424c1b7fa6d86484ddcccac391e49c Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 5 Dec 2024 16:28:36 -0600
Subject: cpufreq/amd-pstate: Use FIELD_PREP and FIELD_GET macros
The FIELD_PREP and FIELD_GET macros improve readability and help
to avoid shifting bugs.
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
---
drivers/cpufreq/amd-pstate.c | 45 ++++++++++++++++--------------------
1 file changed, 20 insertions(+), 25 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -22,6 +22,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -88,6 +89,11 @@ static bool cppc_enabled;
static bool amd_pstate_prefcore = true;
static struct quirk_entry *quirks;
+#define AMD_PSTATE_MAX_PERF_MASK GENMASK(7, 0)
+#define AMD_PSTATE_MIN_PERF_MASK GENMASK(15, 8)
+#define AMD_PSTATE_DES_PERF_MASK GENMASK(23, 16)
+#define AMD_PSTATE_EPP_PERF_MASK GENMASK(31, 24)
+
/*
* AMD Energy Preference Performance (EPP)
* The EPP is used in the CCLK DPM controller to drive
@@ -212,7 +218,6 @@ static DEFINE_MUTEX(amd_pstate_driver_lo
static s16 msr_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
{
- u64 epp;
int ret;
if (!cppc_req_cached) {
@@ -222,9 +227,8 @@ static s16 msr_get_epp(struct amd_cpudat
return ret;
}
}
- epp = (cppc_req_cached >> 24) & 0xFF;
- return (s16)epp;
+ return FIELD_GET(AMD_PSTATE_EPP_PERF_MASK, cppc_req_cached);
}
DEFINE_STATIC_CALL(amd_pstate_get_epp, msr_get_epp);
@@ -299,12 +303,11 @@ static inline void amd_pstate_update_per
static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
{
- int ret;
-
u64 value = READ_ONCE(cpudata->cppc_req_cached);
+ int ret;
- value &= ~GENMASK_ULL(31, 24);
- value |= (u64)epp << 24;
+ value &= ~AMD_PSTATE_EPP_PERF_MASK;
+ value |= FIELD_PREP(AMD_PSTATE_EPP_PERF_MASK, epp);
WRITE_ONCE(cpudata->cppc_req_cached, value);
ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
@@ -563,18 +566,15 @@ static void amd_pstate_update(struct amd
des_perf = 0;
}
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
-
- value &= ~AMD_CPPC_DES_PERF(~0L);
- value |= AMD_CPPC_DES_PERF(des_perf);
-
/* limit the max perf when core performance boost feature is disabled */
if (!cpudata->boost_supported)
max_perf = min_t(unsigned long, nominal_perf, max_perf);
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(max_perf);
+ value &= ~(AMD_PSTATE_MAX_PERF_MASK | AMD_PSTATE_MIN_PERF_MASK |
+ AMD_PSTATE_DES_PERF_MASK);
+ value |= FIELD_PREP(AMD_PSTATE_MAX_PERF_MASK, max_perf);
+ value |= FIELD_PREP(AMD_PSTATE_DES_PERF_MASK, des_perf);
+ value |= FIELD_PREP(AMD_PSTATE_MIN_PERF_MASK, min_perf);
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
@@ -1601,16 +1601,11 @@ static int amd_pstate_epp_update_limit(s
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
min_perf = min(cpudata->nominal_perf, max_perf);
- /* Initial min/max values for CPPC Performance Controls Register */
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
-
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(max_perf);
-
- /* CPPC EPP feature require to set zero to the desire perf bit */
- value &= ~AMD_CPPC_DES_PERF(~0L);
- value |= AMD_CPPC_DES_PERF(0);
+ value &= ~(AMD_PSTATE_MAX_PERF_MASK | AMD_PSTATE_MIN_PERF_MASK |
+ AMD_PSTATE_DES_PERF_MASK);
+ value |= FIELD_PREP(AMD_PSTATE_MAX_PERF_MASK, max_perf);
+ value |= FIELD_PREP(AMD_PSTATE_DES_PERF_MASK, 0);
+ value |= FIELD_PREP(AMD_PSTATE_MIN_PERF_MASK, min_perf);
/* Get BIOS pre-defined epp value */
epp = amd_pstate_get_epp(cpudata, value);

View File

@@ -1,103 +0,0 @@
From b1e3f18eb18f9febe9f4e32a1ef55cf60c97892a Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 5 Dec 2024 16:28:37 -0600
Subject: cpufreq/amd-pstate: Store the boost numerator as highest perf again
commit ad4caad58d91d ("cpufreq: amd-pstate: Merge
amd_pstate_highest_perf_set() into amd_get_boost_ratio_numerator()")
changed the semantics for highest perf and commit 18d9b52271213
("cpufreq/amd-pstate: Use nominal perf for limits when boost is disabled")
worked around those semantic changes.
This however is a confusing result and furthermore makes it awkward to
change frequency limits and boost due to the scaling differences. Restore
the boost numerator to highest perf again.
Suggested-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Fixes: ad4caad58d91 ("cpufreq: amd-pstate: Merge amd_pstate_highest_perf_set() into amd_get_boost_ratio_numerator()")
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
---
Documentation/admin-guide/pm/amd-pstate.rst | 4 +---
drivers/cpufreq/amd-pstate.c | 25 ++++++++++++---------
2 files changed, 16 insertions(+), 13 deletions(-)
--- a/Documentation/admin-guide/pm/amd-pstate.rst
+++ b/Documentation/admin-guide/pm/amd-pstate.rst
@@ -251,9 +251,7 @@ performance supported in `AMD CPPC Perfo
In some ASICs, the highest CPPC performance is not the one in the ``_CPC``
table, so we need to expose it to sysfs. If boost is not active, but
still supported, this maximum frequency will be larger than the one in
-``cpuinfo``. On systems that support preferred core, the driver will have
-different values for some cores than others and this will reflect the values
-advertised by the platform at bootup.
+``cpuinfo``.
This attribute is read-only.
``amd_pstate_lowest_nonlinear_freq``
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -438,15 +438,19 @@ static inline int amd_pstate_cppc_enable
static int msr_init_perf(struct amd_cpudata *cpudata)
{
- u64 cap1;
+ u64 cap1, numerator;
int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
&cap1);
if (ret)
return ret;
- WRITE_ONCE(cpudata->highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
- WRITE_ONCE(cpudata->max_limit_perf, AMD_CPPC_HIGHEST_PERF(cap1));
+ ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->highest_perf, numerator);
+ WRITE_ONCE(cpudata->max_limit_perf, numerator);
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
@@ -458,13 +462,18 @@ static int msr_init_perf(struct amd_cpud
static int shmem_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
+ u64 numerator;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
- WRITE_ONCE(cpudata->highest_perf, cppc_perf.highest_perf);
- WRITE_ONCE(cpudata->max_limit_perf, cppc_perf.highest_perf);
+ ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->highest_perf, numerator);
+ WRITE_ONCE(cpudata->max_limit_perf, numerator);
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
cppc_perf.lowest_nonlinear_perf);
@@ -950,7 +959,6 @@ static int amd_pstate_init_freq(struct a
{
int ret;
u32 min_freq, max_freq;
- u64 numerator;
u32 nominal_perf, nominal_freq;
u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
u32 boost_ratio, lowest_nonlinear_ratio;
@@ -972,10 +980,7 @@ static int amd_pstate_init_freq(struct a
nominal_perf = READ_ONCE(cpudata->nominal_perf);
- ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
- if (ret)
- return ret;
- boost_ratio = div_u64(numerator << SCHED_CAPACITY_SHIFT, nominal_perf);
+ boost_ratio = div_u64(cpudata->highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);

View File

@@ -1,45 +0,0 @@
From fab64249be63c384484cf82f4e08724577ad3b84 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 5 Dec 2024 16:28:38 -0600
Subject: cpufreq/amd-pstate: Use boost numerator for upper bound of
frequencies
commit 18d9b5227121 ("cpufreq/amd-pstate: Use nominal perf for limits
when boost is disabled") introduced different semantics for min/max limits
based upon whether the user turned off boost from sysfs.
This however is not necessary when the highest perf value is the boost
numerator.
Suggested-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Fixes: 18d9b5227121 ("cpufreq/amd-pstate: Use nominal perf for limits when boost is disabled")
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
---
drivers/cpufreq/amd-pstate.c | 13 +++++--------
1 file changed, 5 insertions(+), 8 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -631,16 +631,13 @@ static int amd_pstate_verify(struct cpuf
static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
{
- u32 max_limit_perf, min_limit_perf, lowest_perf, max_perf;
+ u32 max_limit_perf, min_limit_perf, lowest_perf, max_perf, max_freq;
struct amd_cpudata *cpudata = policy->driver_data;
- if (cpudata->boost_supported && !policy->boost_enabled)
- max_perf = READ_ONCE(cpudata->nominal_perf);
- else
- max_perf = READ_ONCE(cpudata->highest_perf);
-
- max_limit_perf = div_u64(policy->max * max_perf, policy->cpuinfo.max_freq);
- min_limit_perf = div_u64(policy->min * max_perf, policy->cpuinfo.max_freq);
+ max_perf = READ_ONCE(cpudata->highest_perf);
+ max_freq = READ_ONCE(cpudata->max_freq);
+ max_limit_perf = div_u64(policy->max * max_perf, max_freq);
+ min_limit_perf = div_u64(policy->min * max_perf, max_freq);
lowest_perf = READ_ONCE(cpudata->lowest_perf);
if (min_limit_perf < lowest_perf)

View File

@@ -1,36 +0,0 @@
From a9b210306e861b7c2d3b8532c85e8cd54c3b322a Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 5 Dec 2024 16:28:39 -0600
Subject: cpufreq/amd-pstate: Only update the cached value in msr_set_epp() on
success
If writing the MSR MSR_AMD_CPPC_REQ fails then the cached value in the
amd_cpudata structure should not be updated.
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
---
drivers/cpufreq/amd-pstate.c | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -308,11 +308,15 @@ static int msr_set_epp(struct amd_cpudat
value &= ~AMD_PSTATE_EPP_PERF_MASK;
value |= FIELD_PREP(AMD_PSTATE_EPP_PERF_MASK, epp);
- WRITE_ONCE(cpudata->cppc_req_cached, value);
ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- if (!ret)
- cpudata->epp_cached = epp;
+ if (ret) {
+ pr_err("failed to set energy perf value (%d)\n", ret);
+ return ret;
+ }
+
+ cpudata->epp_cached = epp;
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
return ret;
}

View File

@@ -1,134 +0,0 @@
From 27a3d0642a3d26ba54e8a21575e6d2cb0ada55b3 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 5 Dec 2024 16:28:40 -0600
Subject: cpufreq/amd-pstate: store all values in cpudata struct in khz
Storing values in the cpudata structure in different units leads
to confusion and hardcoded conversions elsewhere. After ratios are
calculated store everything in khz for any future use. Adjust all
relevant consumers for this change as well.
Suggested-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
---
drivers/cpufreq/amd-pstate-ut.c | 12 +++++-------
drivers/cpufreq/amd-pstate.c | 30 +++++++++++++++---------------
2 files changed, 20 insertions(+), 22 deletions(-)
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -207,7 +207,6 @@ static void amd_pstate_ut_check_freq(u32
int cpu = 0;
struct cpufreq_policy *policy = NULL;
struct amd_cpudata *cpudata = NULL;
- u32 nominal_freq_khz;
for_each_possible_cpu(cpu) {
policy = cpufreq_cpu_get(cpu);
@@ -215,14 +214,13 @@ static void amd_pstate_ut_check_freq(u32
break;
cpudata = policy->driver_data;
- nominal_freq_khz = cpudata->nominal_freq*1000;
- if (!((cpudata->max_freq >= nominal_freq_khz) &&
- (nominal_freq_khz > cpudata->lowest_nonlinear_freq) &&
+ if (!((cpudata->max_freq >= cpudata->nominal_freq) &&
+ (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
(cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
(cpudata->min_freq > 0))) {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
- __func__, cpu, cpudata->max_freq, nominal_freq_khz,
+ __func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
cpudata->lowest_nonlinear_freq, cpudata->min_freq);
goto skip_test;
}
@@ -236,13 +234,13 @@ static void amd_pstate_ut_check_freq(u32
if (cpudata->boost_supported) {
if ((policy->max == cpudata->max_freq) ||
- (policy->max == nominal_freq_khz))
+ (policy->max == cpudata->nominal_freq))
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
else {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
__func__, cpu, policy->max, cpudata->max_freq,
- nominal_freq_khz);
+ cpudata->nominal_freq);
goto skip_test;
}
} else {
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -769,8 +769,8 @@ static int amd_pstate_cpu_boost_update(s
if (on)
policy->cpuinfo.max_freq = max_freq;
- else if (policy->cpuinfo.max_freq > nominal_freq * 1000)
- policy->cpuinfo.max_freq = nominal_freq * 1000;
+ else if (policy->cpuinfo.max_freq > nominal_freq)
+ policy->cpuinfo.max_freq = nominal_freq;
policy->max = policy->cpuinfo.max_freq;
@@ -970,29 +970,29 @@ static int amd_pstate_init_freq(struct a
return ret;
if (quirks && quirks->lowest_freq)
- min_freq = quirks->lowest_freq * 1000;
+ min_freq = quirks->lowest_freq;
else
- min_freq = cppc_perf.lowest_freq * 1000;
+ min_freq = cppc_perf.lowest_freq;
if (quirks && quirks->nominal_freq)
- nominal_freq = quirks->nominal_freq ;
+ nominal_freq = quirks->nominal_freq;
else
nominal_freq = cppc_perf.nominal_freq;
nominal_perf = READ_ONCE(cpudata->nominal_perf);
boost_ratio = div_u64(cpudata->highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
- max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
+ max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT);
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
nominal_perf);
- lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
+ lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT);
- WRITE_ONCE(cpudata->min_freq, min_freq);
- WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
- WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
- WRITE_ONCE(cpudata->max_freq, max_freq);
+ WRITE_ONCE(cpudata->min_freq, min_freq * 1000);
+ WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq * 1000);
+ WRITE_ONCE(cpudata->nominal_freq, nominal_freq * 1000);
+ WRITE_ONCE(cpudata->max_freq, max_freq * 1000);
/**
* Below values need to be initialized correctly, otherwise driver will fail to load
@@ -1000,15 +1000,15 @@ static int amd_pstate_init_freq(struct a
* lowest_nonlinear_freq is a value between [min_freq, nominal_freq]
* Check _CPC in ACPI table objects if any values are incorrect
*/
- if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) {
+ if (min_freq <= 0 || max_freq <= 0 || cpudata->nominal_freq <= 0 || min_freq > max_freq) {
pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n",
- min_freq, max_freq, nominal_freq * 1000);
+ min_freq, max_freq, cpudata->nominal_freq);
return -EINVAL;
}
- if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq * 1000) {
+ if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > cpudata->nominal_freq) {
pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n",
- lowest_nonlinear_freq, min_freq, nominal_freq * 1000);
+ lowest_nonlinear_freq, min_freq, cpudata->nominal_freq);
return -EINVAL;
}

View File

@@ -1,69 +0,0 @@
From b2a0f625fa30dc907daf1b07bf94b15872da096b Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 5 Dec 2024 16:28:41 -0600
Subject: cpufreq/amd-pstate: Change amd_pstate_update_perf() to return an int
As msr_update_perf() calls an MSR it's possible that it fails. Pass
this return code up to the caller.
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
---
drivers/cpufreq/amd-pstate.c | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -281,24 +281,26 @@ static int amd_pstate_get_energy_pref_in
return index;
}
-static void msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
u32 des_perf, u32 max_perf, bool fast_switch)
{
- if (fast_switch)
+ if (fast_switch) {
wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
- else
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- READ_ONCE(cpudata->cppc_req_cached));
+ return 0;
+ }
+
+ return wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
+ READ_ONCE(cpudata->cppc_req_cached));
}
DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
-static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
+static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
u32 min_perf, u32 des_perf,
u32 max_perf, bool fast_switch)
{
- static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
+ return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
+ max_perf, fast_switch);
}
static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
@@ -510,7 +512,7 @@ static inline int amd_pstate_init_perf(s
return static_call(amd_pstate_init_perf)(cpudata);
}
-static void shmem_update_perf(struct amd_cpudata *cpudata,
+static int shmem_update_perf(struct amd_cpudata *cpudata,
u32 min_perf, u32 des_perf,
u32 max_perf, bool fast_switch)
{
@@ -520,7 +522,7 @@ static void shmem_update_perf(struct amd
perf_ctrls.min_perf = min_perf;
perf_ctrls.desired_perf = des_perf;
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ return cppc_set_perf(cpudata->cpu, &perf_ctrls);
}
static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)

View File

@@ -1,85 +0,0 @@
From 8399d57cda79a662800a4cd01bd7db951355f451 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 5 Dec 2024 16:28:42 -0600
Subject: cpufreq/amd-pstate: Move limit updating code
The limit updating code in amd_pstate_epp_update_limit() should not
only apply to EPP updates. Move it to amd_pstate_update_min_max_limit()
so other callers can benefit as well.
With this move it's not necessary to have clamp_t calls anymore because
the verify callback is called when setting limits.
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 28 +++++-----------------------
1 file changed, 5 insertions(+), 23 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -567,10 +567,6 @@ static void amd_pstate_update(struct amd
u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
u64 value = prev;
- min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
- max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
max_freq = READ_ONCE(cpudata->max_limit_freq);
@@ -637,7 +633,7 @@ static int amd_pstate_verify(struct cpuf
static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
{
- u32 max_limit_perf, min_limit_perf, lowest_perf, max_perf, max_freq;
+ u32 max_limit_perf, min_limit_perf, max_perf, max_freq;
struct amd_cpudata *cpudata = policy->driver_data;
max_perf = READ_ONCE(cpudata->highest_perf);
@@ -645,12 +641,8 @@ static int amd_pstate_update_min_max_lim
max_limit_perf = div_u64(policy->max * max_perf, max_freq);
min_limit_perf = div_u64(policy->min * max_perf, max_freq);
- lowest_perf = READ_ONCE(cpudata->lowest_perf);
- if (min_limit_perf < lowest_perf)
- min_limit_perf = lowest_perf;
-
- if (max_limit_perf < min_limit_perf)
- max_limit_perf = min_limit_perf;
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ min_limit_perf = min(cpudata->nominal_perf, max_limit_perf);
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
@@ -1592,28 +1584,18 @@ static void amd_pstate_epp_cpu_exit(stru
static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- u32 max_perf, min_perf;
u64 value;
s16 epp;
- max_perf = READ_ONCE(cpudata->highest_perf);
- min_perf = READ_ONCE(cpudata->lowest_perf);
amd_pstate_update_min_max_limit(policy);
- max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
- min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
value = READ_ONCE(cpudata->cppc_req_cached);
- if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
- min_perf = min(cpudata->nominal_perf, max_perf);
-
value &= ~(AMD_PSTATE_MAX_PERF_MASK | AMD_PSTATE_MIN_PERF_MASK |
AMD_PSTATE_DES_PERF_MASK);
- value |= FIELD_PREP(AMD_PSTATE_MAX_PERF_MASK, max_perf);
+ value |= FIELD_PREP(AMD_PSTATE_MAX_PERF_MASK, cpudata->max_limit_perf);
value |= FIELD_PREP(AMD_PSTATE_DES_PERF_MASK, 0);
- value |= FIELD_PREP(AMD_PSTATE_MIN_PERF_MASK, min_perf);
+ value |= FIELD_PREP(AMD_PSTATE_MIN_PERF_MASK, cpudata->min_limit_perf);
/* Get BIOS pre-defined epp value */
epp = amd_pstate_get_epp(cpudata, value);

View File

@@ -1,233 +0,0 @@
From fe7f993593ee1891acc875dfb1e091cf17a43f7a Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 5 Dec 2024 16:28:43 -0600
Subject: cpufreq/amd-pstate: Cache EPP value and use that everywhere
Cache the value in cpudata->epp_cached, and use that for all callers.
As all callers use cached value merge amd_pstate_get_energy_pref_index()
into show_energy_performance_preference().
Check if the EPP value is changed before writing it to MSR or
shared memory region.
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 107 +++++++++++++++--------------------
1 file changed, 45 insertions(+), 62 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -216,29 +216,28 @@ static inline int get_mode_idx_from_str(
static DEFINE_MUTEX(amd_pstate_limits_lock);
static DEFINE_MUTEX(amd_pstate_driver_lock);
-static s16 msr_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
+static s16 msr_get_epp(struct amd_cpudata *cpudata)
{
+ u64 value;
int ret;
- if (!cppc_req_cached) {
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req_cached);
- if (ret < 0) {
- pr_debug("Could not retrieve energy perf value (%d)\n", ret);
- return ret;
- }
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return ret;
}
- return FIELD_GET(AMD_PSTATE_EPP_PERF_MASK, cppc_req_cached);
+ return FIELD_GET(AMD_PSTATE_EPP_PERF_MASK, value);
}
DEFINE_STATIC_CALL(amd_pstate_get_epp, msr_get_epp);
-static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
+static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata)
{
- return static_call(amd_pstate_get_epp)(cpudata, cppc_req_cached);
+ return static_call(amd_pstate_get_epp)(cpudata);
}
-static s16 shmem_get_epp(struct amd_cpudata *cpudata, u64 dummy)
+static s16 shmem_get_epp(struct amd_cpudata *cpudata)
{
u64 epp;
int ret;
@@ -252,35 +251,6 @@ static s16 shmem_get_epp(struct amd_cpud
return (s16)(epp & 0xff);
}
-static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
-{
- s16 epp;
- int index = -EINVAL;
-
- epp = amd_pstate_get_epp(cpudata, 0);
- if (epp < 0)
- return epp;
-
- switch (epp) {
- case AMD_CPPC_EPP_PERFORMANCE:
- index = EPP_INDEX_PERFORMANCE;
- break;
- case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
- index = EPP_INDEX_BALANCE_PERFORMANCE;
- break;
- case AMD_CPPC_EPP_BALANCE_POWERSAVE:
- index = EPP_INDEX_BALANCE_POWERSAVE;
- break;
- case AMD_CPPC_EPP_POWERSAVE:
- index = EPP_INDEX_POWERSAVE;
- break;
- default:
- break;
- }
-
- return index;
-}
-
static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
u32 des_perf, u32 max_perf, bool fast_switch)
{
@@ -305,19 +275,23 @@ static inline int amd_pstate_update_perf
static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
{
- u64 value = READ_ONCE(cpudata->cppc_req_cached);
+ u64 value, prev;
int ret;
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
value &= ~AMD_PSTATE_EPP_PERF_MASK;
value |= FIELD_PREP(AMD_PSTATE_EPP_PERF_MASK, epp);
+ if (value == prev)
+ return 0;
+
ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
if (ret) {
pr_err("failed to set energy perf value (%d)\n", ret);
return ret;
}
- cpudata->epp_cached = epp;
+ WRITE_ONCE(cpudata->epp_cached, epp);
WRITE_ONCE(cpudata->cppc_req_cached, value);
return ret;
@@ -335,13 +309,16 @@ static int shmem_set_epp(struct amd_cpud
int ret;
struct cppc_perf_ctrls perf_ctrls;
+ if (epp == cpudata->epp_cached)
+ return 0;
+
perf_ctrls.energy_perf = epp;
ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
if (ret) {
pr_debug("failed to set energy perf value (%d)\n", ret);
return ret;
}
- cpudata->epp_cached = epp;
+ WRITE_ONCE(cpudata->epp_cached, epp);
return ret;
}
@@ -1244,9 +1221,22 @@ static ssize_t show_energy_performance_p
struct amd_cpudata *cpudata = policy->driver_data;
int preference;
- preference = amd_pstate_get_energy_pref_index(cpudata);
- if (preference < 0)
- return preference;
+ switch (cpudata->epp_cached) {
+ case AMD_CPPC_EPP_PERFORMANCE:
+ preference = EPP_INDEX_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
+ preference = EPP_INDEX_BALANCE_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_POWERSAVE:
+ preference = EPP_INDEX_BALANCE_POWERSAVE;
+ break;
+ case AMD_CPPC_EPP_POWERSAVE:
+ preference = EPP_INDEX_POWERSAVE;
+ break;
+ default:
+ return -EINVAL;
+ }
return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
}
@@ -1531,7 +1521,7 @@ static int amd_pstate_epp_cpu_init(struc
policy->driver_data = cpudata;
- cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata, 0);
+ cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata);
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
@@ -1585,35 +1575,26 @@ static int amd_pstate_epp_update_limit(s
{
struct amd_cpudata *cpudata = policy->driver_data;
u64 value;
- s16 epp;
amd_pstate_update_min_max_limit(policy);
value = READ_ONCE(cpudata->cppc_req_cached);
value &= ~(AMD_PSTATE_MAX_PERF_MASK | AMD_PSTATE_MIN_PERF_MASK |
- AMD_PSTATE_DES_PERF_MASK);
+ AMD_PSTATE_DES_PERF_MASK | AMD_PSTATE_EPP_PERF_MASK);
value |= FIELD_PREP(AMD_PSTATE_MAX_PERF_MASK, cpudata->max_limit_perf);
value |= FIELD_PREP(AMD_PSTATE_DES_PERF_MASK, 0);
value |= FIELD_PREP(AMD_PSTATE_MIN_PERF_MASK, cpudata->min_limit_perf);
- /* Get BIOS pre-defined epp value */
- epp = amd_pstate_get_epp(cpudata, value);
- if (epp < 0) {
- /**
- * This return value can only be negative for shared_memory
- * systems where EPP register read/write not supported.
- */
- return epp;
- }
-
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
- epp = 0;
+ WRITE_ONCE(cpudata->epp_cached, 0);
+ value |= FIELD_PREP(AMD_PSTATE_EPP_PERF_MASK, cpudata->epp_cached);
WRITE_ONCE(cpudata->cppc_req_cached, value);
if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ cpudata->epp_cached,
cpudata->min_limit_perf,
cpudata->max_limit_perf,
cpudata->boost_state);
@@ -1622,7 +1603,7 @@ static int amd_pstate_epp_update_limit(s
amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
cpudata->max_limit_perf, false);
- return amd_pstate_set_epp(cpudata, epp);
+ return amd_pstate_set_epp(cpudata, READ_ONCE(cpudata->epp_cached));
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@@ -1638,6 +1619,8 @@ static int amd_pstate_epp_set_policy(str
cpudata->policy = policy->policy;
+ guard(mutex)(&amd_pstate_limits_lock);
+
ret = amd_pstate_epp_update_limit(policy);
if (ret)
return ret;

View File

@@ -1,182 +0,0 @@
From 98f0f9202cd0fc549f5beaaaf8750658d8ee2140 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 5 Dec 2024 16:28:44 -0600
Subject: cpufreq/amd-pstate: Always write EPP value when updating perf
For MSR systems the EPP value is in the same register as perf targets
and so divding them into two separate MSR writes is wasteful.
In msr_update_perf(), update both EPP and perf values in one write to
MSR_AMD_CPPC_REQ, and cache them if successful.
To accomplish this plumb the EPP value into the update_perf call and modify
all its callers to check the return value.
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 71 ++++++++++++++++++++++--------------
1 file changed, 43 insertions(+), 28 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -252,25 +252,36 @@ static s16 shmem_get_epp(struct amd_cpud
}
static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch)
+ u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
{
+ u64 value;
+
+ value = READ_ONCE(cpudata->cppc_req_cached);
if (fast_switch) {
wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
return 0;
+ } else {
+ int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
+ READ_ONCE(cpudata->cppc_req_cached));
+ if (ret)
+ return ret;
}
- return wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- READ_ONCE(cpudata->cppc_req_cached));
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+ WRITE_ONCE(cpudata->epp_cached, epp);
+
+ return 0;
}
DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
+ u32 max_perf, u32 epp,
+ bool fast_switch)
{
return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
+ max_perf, epp, fast_switch);
}
static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
@@ -489,12 +500,19 @@ static inline int amd_pstate_init_perf(s
return static_call(amd_pstate_init_perf)(cpudata);
}
-static int shmem_update_perf(struct amd_cpudata *cpudata,
- u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
+static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+ u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
{
struct cppc_perf_ctrls perf_ctrls;
+ if (cppc_state == AMD_PSTATE_ACTIVE) {
+ int ret = shmem_set_epp(cpudata, epp);
+
+ if (ret)
+ return ret;
+ WRITE_ONCE(cpudata->epp_cached, epp);
+ }
+
perf_ctrls.max_perf = max_perf;
perf_ctrls.min_perf = min_perf;
perf_ctrls.desired_perf = des_perf;
@@ -575,10 +593,10 @@ static void amd_pstate_update(struct amd
WRITE_ONCE(cpudata->cppc_req_cached, value);
- amd_pstate_update_perf(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
+ amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
cpufreq_policy_put:
+
cpufreq_cpu_put(policy);
}
@@ -1575,6 +1593,7 @@ static int amd_pstate_epp_update_limit(s
{
struct amd_cpudata *cpudata = policy->driver_data;
u64 value;
+ u32 epp;
amd_pstate_update_min_max_limit(policy);
@@ -1587,23 +1606,19 @@ static int amd_pstate_epp_update_limit(s
value |= FIELD_PREP(AMD_PSTATE_MIN_PERF_MASK, cpudata->min_limit_perf);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
- WRITE_ONCE(cpudata->epp_cached, 0);
- value |= FIELD_PREP(AMD_PSTATE_EPP_PERF_MASK, cpudata->epp_cached);
-
- WRITE_ONCE(cpudata->cppc_req_cached, value);
+ epp = 0;
+ else
+ epp = READ_ONCE(cpudata->epp_cached);
if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
- cpudata->epp_cached,
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
cpudata->min_limit_perf,
cpudata->max_limit_perf,
cpudata->boost_state);
}
- amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
- cpudata->max_limit_perf, false);
-
- return amd_pstate_set_epp(cpudata, READ_ONCE(cpudata->epp_cached));
+ return amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
+ cpudata->max_limit_perf, epp, false);
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@@ -1634,7 +1649,7 @@ static int amd_pstate_epp_set_policy(str
return 0;
}
-static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+static int amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
{
u64 max_perf;
int ret;
@@ -1652,17 +1667,19 @@ static void amd_pstate_epp_reenable(stru
max_perf, cpudata->boost_state);
}
- amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
- amd_pstate_set_epp(cpudata, cpudata->epp_cached);
+ return amd_pstate_update_perf(cpudata, 0, 0, max_perf, cpudata->epp_cached, false);
}
static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
- amd_pstate_epp_reenable(cpudata);
+ ret = amd_pstate_epp_reenable(cpudata);
+ if (ret)
+ return ret;
cpudata->suspended = false;
return 0;
@@ -1686,10 +1703,8 @@ static int amd_pstate_epp_cpu_offline(st
min_perf, min_perf, cpudata->boost_state);
}
- amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
- amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);
-
- return 0;
+ return amd_pstate_update_perf(cpudata, min_perf, 0, min_perf,
+ AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
}
static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)

View File

@@ -1,158 +0,0 @@
From acf3f432287638044e472501a1d5969abee15043 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 5 Dec 2024 16:28:45 -0600
Subject: cpufreq/amd-pstate: Check if CPPC request has changed before writing
to the MSR or shared memory
Move the common MSR field formatting code to msr_update_perf() from
its callers.
Ensure that the MSR write is necessary before flushing a write out.
Also drop the comparison from the passive flow tracing.
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate-trace.h | 7 +----
drivers/cpufreq/amd-pstate.c | 47 +++++++++++-------------------
2 files changed, 18 insertions(+), 36 deletions(-)
--- a/drivers/cpufreq/amd-pstate-trace.h
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -32,7 +32,6 @@ TRACE_EVENT(amd_pstate_perf,
u64 aperf,
u64 tsc,
unsigned int cpu_id,
- bool changed,
bool fast_switch
),
@@ -44,7 +43,6 @@ TRACE_EVENT(amd_pstate_perf,
aperf,
tsc,
cpu_id,
- changed,
fast_switch
),
@@ -57,7 +55,6 @@ TRACE_EVENT(amd_pstate_perf,
__field(unsigned long long, aperf)
__field(unsigned long long, tsc)
__field(unsigned int, cpu_id)
- __field(bool, changed)
__field(bool, fast_switch)
),
@@ -70,11 +67,10 @@ TRACE_EVENT(amd_pstate_perf,
__entry->aperf = aperf;
__entry->tsc = tsc;
__entry->cpu_id = cpu_id;
- __entry->changed = changed;
__entry->fast_switch = fast_switch;
),
- TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u changed=%s fast_switch=%s",
+ TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
(unsigned long)__entry->min_perf,
(unsigned long)__entry->target_perf,
(unsigned long)__entry->capacity,
@@ -83,7 +79,6 @@ TRACE_EVENT(amd_pstate_perf,
(unsigned long long)__entry->aperf,
(unsigned long long)__entry->tsc,
(unsigned int)__entry->cpu_id,
- (__entry->changed) ? "true" : "false",
(__entry->fast_switch) ? "true" : "false"
)
);
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -254,15 +254,26 @@ static s16 shmem_get_epp(struct amd_cpud
static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
{
- u64 value;
+ u64 value, prev;
+
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~(AMD_PSTATE_MAX_PERF_MASK | AMD_PSTATE_MIN_PERF_MASK |
+ AMD_PSTATE_DES_PERF_MASK | AMD_PSTATE_EPP_PERF_MASK);
+ value |= FIELD_PREP(AMD_PSTATE_MAX_PERF_MASK, max_perf);
+ value |= FIELD_PREP(AMD_PSTATE_DES_PERF_MASK, des_perf);
+ value |= FIELD_PREP(AMD_PSTATE_MIN_PERF_MASK, min_perf);
+ value |= FIELD_PREP(AMD_PSTATE_EPP_PERF_MASK, epp);
+
+ if (value == prev)
+ return 0;
- value = READ_ONCE(cpudata->cppc_req_cached);
if (fast_switch) {
- wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
+ wrmsrl(MSR_AMD_CPPC_REQ, value);
return 0;
} else {
- int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- READ_ONCE(cpudata->cppc_req_cached));
+ int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+
if (ret)
return ret;
}
@@ -558,9 +569,7 @@ static void amd_pstate_update(struct amd
{
unsigned long max_freq;
struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
- u64 prev = READ_ONCE(cpudata->cppc_req_cached);
u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
- u64 value = prev;
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
@@ -576,27 +585,14 @@ static void amd_pstate_update(struct amd
if (!cpudata->boost_supported)
max_perf = min_t(unsigned long, nominal_perf, max_perf);
- value &= ~(AMD_PSTATE_MAX_PERF_MASK | AMD_PSTATE_MIN_PERF_MASK |
- AMD_PSTATE_DES_PERF_MASK);
- value |= FIELD_PREP(AMD_PSTATE_MAX_PERF_MASK, max_perf);
- value |= FIELD_PREP(AMD_PSTATE_DES_PERF_MASK, des_perf);
- value |= FIELD_PREP(AMD_PSTATE_MIN_PERF_MASK, min_perf);
-
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
- cpudata->cpu, (value != prev), fast_switch);
+ cpudata->cpu, fast_switch);
}
- if (value == prev)
- goto cpufreq_policy_put;
-
- WRITE_ONCE(cpudata->cppc_req_cached, value);
-
amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
-cpufreq_policy_put:
-
cpufreq_cpu_put(policy);
}
@@ -1592,19 +1588,10 @@ static void amd_pstate_epp_cpu_exit(stru
static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- u64 value;
u32 epp;
amd_pstate_update_min_max_limit(policy);
- value = READ_ONCE(cpudata->cppc_req_cached);
-
- value &= ~(AMD_PSTATE_MAX_PERF_MASK | AMD_PSTATE_MIN_PERF_MASK |
- AMD_PSTATE_DES_PERF_MASK | AMD_PSTATE_EPP_PERF_MASK);
- value |= FIELD_PREP(AMD_PSTATE_MAX_PERF_MASK, cpudata->max_limit_perf);
- value |= FIELD_PREP(AMD_PSTATE_DES_PERF_MASK, 0);
- value |= FIELD_PREP(AMD_PSTATE_MIN_PERF_MASK, cpudata->min_limit_perf);
-
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
epp = 0;
else

View File

@@ -1,42 +0,0 @@
From 55bcae033b08b1b926f927616cca17242cfcfe59 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 5 Dec 2024 16:28:46 -0600
Subject: cpufreq/amd-pstate: Drop ret variable from
amd_pstate_set_energy_pref_index()
The ret variable is not necessary.
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 10 +++-------
1 file changed, 3 insertions(+), 7 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -348,13 +348,11 @@ static int shmem_set_epp(struct amd_cpud
static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
int pref_index)
{
- int epp = -EINVAL;
- int ret;
+ int epp;
if (!pref_index)
epp = cpudata->epp_default;
-
- if (epp == -EINVAL)
+ else
epp = epp_values[pref_index];
if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
@@ -370,9 +368,7 @@ static int amd_pstate_set_energy_pref_in
cpudata->boost_state);
}
- ret = amd_pstate_set_epp(cpudata, epp);
-
- return ret;
+ return amd_pstate_set_epp(cpudata, epp);
}
static inline int msr_cppc_enable(bool enable)

View File

@@ -1,58 +0,0 @@
From f1edcca6a96f087f5862a14012fafd4eb9738601 Mon Sep 17 00:00:00 2001
From: Mario Limonciello <mario.limonciello@amd.com>
Date: Thu, 5 Dec 2024 16:28:47 -0600
Subject: cpufreq/amd-pstate: Set different default EPP policy for Epyc and
Ryzen
For Ryzen systems the EPP policy set by the BIOS is generally configured
to performance as this is the default register value for the CPPC request
MSR.
If a user doesn't use additional software to configure EPP then the system
will default biased towards performance and consume extra battery. Instead
configure the default to "balanced_performance" for this case.
Suggested-by: Artem S. Tashkinov <aros@gmx.com>
Closes: https://bugzilla.kernel.org/show_bug.cgi?id=219526
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
---
drivers/cpufreq/amd-pstate.c | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1531,8 +1531,6 @@ static int amd_pstate_epp_cpu_init(struc
policy->driver_data = cpudata;
- cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata);
-
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
@@ -1543,10 +1541,13 @@ static int amd_pstate_epp_cpu_init(struc
* the default cpufreq governor is neither powersave nor performance.
*/
if (amd_pstate_acpi_pm_profile_server() ||
- amd_pstate_acpi_pm_profile_undefined())
+ amd_pstate_acpi_pm_profile_undefined()) {
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
- else
+ cpudata->epp_default = amd_pstate_get_epp(cpudata);
+ } else {
policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ cpudata->epp_default = AMD_CPPC_EPP_BALANCE_PERFORMANCE;
+ }
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
@@ -1559,6 +1560,9 @@ static int amd_pstate_epp_cpu_init(struc
return ret;
WRITE_ONCE(cpudata->cppc_cap1_cached, value);
}
+ ret = amd_pstate_set_epp(cpudata, cpudata->epp_default);
+ if (ret)
+ return ret;
current_pstate_driver->adjust_perf = NULL;