328 lines
8.6 KiB
Diff
328 lines
8.6 KiB
Diff
From bbb0d5ec2d1d757fc7b71086f505113845cc2aab Mon Sep 17 00:00:00 2001
|
|
From: Mario Limonciello <mario.limonciello@amd.com>
|
|
Date: Wed, 26 Feb 2025 01:49:32 -0600
|
|
Subject: cpufreq/amd-pstate: Rework CPPC enabling
|
|
|
|
The CPPC enable register is configured as "write once". That is
|
|
any future writes don't actually do anything.
|
|
|
|
Because of this, all the cleanup paths that currently exist for
|
|
CPPC disable are non-effective.
|
|
|
|
Rework CPPC enable to only enable after all the CAP registers have
|
|
been read to avoid enabling CPPC on CPUs with invalid _CPC or
|
|
unpopulated MSRs.
|
|
|
|
As the register is write once, remove all cleanup paths as well.
|
|
|
|
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
|
|
---
|
|
drivers/cpufreq/amd-pstate.c | 179 +++++++----------------------------
|
|
1 file changed, 35 insertions(+), 144 deletions(-)
|
|
|
|
--- a/drivers/cpufreq/amd-pstate.c
|
|
+++ b/drivers/cpufreq/amd-pstate.c
|
|
@@ -85,7 +85,6 @@ static struct cpufreq_driver *current_ps
|
|
static struct cpufreq_driver amd_pstate_driver;
|
|
static struct cpufreq_driver amd_pstate_epp_driver;
|
|
static int cppc_state = AMD_PSTATE_UNDEFINED;
|
|
-static bool cppc_enabled;
|
|
static bool amd_pstate_prefcore = true;
|
|
static struct quirk_entry *quirks;
|
|
|
|
@@ -371,89 +370,21 @@ static int shmem_set_epp(struct cpufreq_
|
|
return ret;
|
|
}
|
|
|
|
-static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy,
|
|
- int pref_index)
|
|
+static inline int msr_cppc_enable(struct cpufreq_policy *policy)
|
|
{
|
|
- struct amd_cpudata *cpudata = policy->driver_data;
|
|
- u8 epp;
|
|
-
|
|
- if (!pref_index)
|
|
- epp = cpudata->epp_default;
|
|
- else
|
|
- epp = epp_values[pref_index];
|
|
-
|
|
- if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
|
|
- pr_debug("EPP cannot be set under performance policy\n");
|
|
- return -EBUSY;
|
|
- }
|
|
-
|
|
- return amd_pstate_set_epp(policy, epp);
|
|
-}
|
|
-
|
|
-static inline int msr_cppc_enable(bool enable)
|
|
-{
|
|
- int ret, cpu;
|
|
- unsigned long logical_proc_id_mask = 0;
|
|
-
|
|
- /*
|
|
- * MSR_AMD_CPPC_ENABLE is write-once, once set it cannot be cleared.
|
|
- */
|
|
- if (!enable)
|
|
- return 0;
|
|
-
|
|
- if (enable == cppc_enabled)
|
|
- return 0;
|
|
-
|
|
- for_each_present_cpu(cpu) {
|
|
- unsigned long logical_id = topology_logical_package_id(cpu);
|
|
-
|
|
- if (test_bit(logical_id, &logical_proc_id_mask))
|
|
- continue;
|
|
-
|
|
- set_bit(logical_id, &logical_proc_id_mask);
|
|
-
|
|
- ret = wrmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_ENABLE,
|
|
- enable);
|
|
- if (ret)
|
|
- return ret;
|
|
- }
|
|
-
|
|
- cppc_enabled = enable;
|
|
- return 0;
|
|
+ return wrmsrl_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1);
|
|
}
|
|
|
|
-static int shmem_cppc_enable(bool enable)
|
|
+static int shmem_cppc_enable(struct cpufreq_policy *policy)
|
|
{
|
|
- int cpu, ret = 0;
|
|
- struct cppc_perf_ctrls perf_ctrls;
|
|
-
|
|
- if (enable == cppc_enabled)
|
|
- return 0;
|
|
-
|
|
- for_each_present_cpu(cpu) {
|
|
- ret = cppc_set_enable(cpu, enable);
|
|
- if (ret)
|
|
- return ret;
|
|
-
|
|
- /* Enable autonomous mode for EPP */
|
|
- if (cppc_state == AMD_PSTATE_ACTIVE) {
|
|
- /* Set desired perf as zero to allow EPP firmware control */
|
|
- perf_ctrls.desired_perf = 0;
|
|
- ret = cppc_set_perf(cpu, &perf_ctrls);
|
|
- if (ret)
|
|
- return ret;
|
|
- }
|
|
- }
|
|
-
|
|
- cppc_enabled = enable;
|
|
- return ret;
|
|
+ return cppc_set_enable(policy->cpu, 1);
|
|
}
|
|
|
|
DEFINE_STATIC_CALL(amd_pstate_cppc_enable, msr_cppc_enable);
|
|
|
|
-static inline int amd_pstate_cppc_enable(bool enable)
|
|
+static inline int amd_pstate_cppc_enable(struct cpufreq_policy *policy)
|
|
{
|
|
- return static_call(amd_pstate_cppc_enable)(enable);
|
|
+ return static_call(amd_pstate_cppc_enable)(policy);
|
|
}
|
|
|
|
static int msr_init_perf(struct amd_cpudata *cpudata)
|
|
@@ -1063,6 +994,10 @@ static int amd_pstate_cpu_init(struct cp
|
|
cpudata->nominal_freq,
|
|
perf.highest_perf);
|
|
|
|
+ ret = amd_pstate_cppc_enable(policy);
|
|
+ if (ret)
|
|
+ goto free_cpudata1;
|
|
+
|
|
policy->boost_enabled = READ_ONCE(cpudata->boost_supported);
|
|
|
|
/* It will be updated by governor */
|
|
@@ -1110,28 +1045,6 @@ static void amd_pstate_cpu_exit(struct c
|
|
kfree(cpudata);
|
|
}
|
|
|
|
-static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
|
|
-{
|
|
- int ret;
|
|
-
|
|
- ret = amd_pstate_cppc_enable(true);
|
|
- if (ret)
|
|
- pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
-static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
|
|
-{
|
|
- int ret;
|
|
-
|
|
- ret = amd_pstate_cppc_enable(false);
|
|
- if (ret)
|
|
- pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
|
|
-
|
|
- return ret;
|
|
-}
|
|
-
|
|
/* Sysfs attributes */
|
|
|
|
/*
|
|
@@ -1223,8 +1136,10 @@ static ssize_t show_energy_performance_a
|
|
static ssize_t store_energy_performance_preference(
|
|
struct cpufreq_policy *policy, const char *buf, size_t count)
|
|
{
|
|
+ struct amd_cpudata *cpudata = policy->driver_data;
|
|
char str_preference[21];
|
|
ssize_t ret;
|
|
+ u8 epp;
|
|
|
|
ret = sscanf(buf, "%20s", str_preference);
|
|
if (ret != 1)
|
|
@@ -1234,7 +1149,17 @@ static ssize_t store_energy_performance_
|
|
if (ret < 0)
|
|
return -EINVAL;
|
|
|
|
- ret = amd_pstate_set_energy_pref_index(policy, ret);
|
|
+ if (!ret)
|
|
+ epp = cpudata->epp_default;
|
|
+ else
|
|
+ epp = epp_values[ret];
|
|
+
|
|
+ if (epp > 0 && policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
|
|
+ pr_debug("EPP cannot be set under performance policy\n");
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
+ ret = amd_pstate_set_epp(policy, epp);
|
|
|
|
return ret ? ret : count;
|
|
}
|
|
@@ -1267,7 +1192,6 @@ static ssize_t show_energy_performance_p
|
|
|
|
static void amd_pstate_driver_cleanup(void)
|
|
{
|
|
- amd_pstate_cppc_enable(false);
|
|
cppc_state = AMD_PSTATE_DISABLE;
|
|
current_pstate_driver = NULL;
|
|
}
|
|
@@ -1301,14 +1225,6 @@ static int amd_pstate_register_driver(in
|
|
|
|
cppc_state = mode;
|
|
|
|
- ret = amd_pstate_cppc_enable(true);
|
|
- if (ret) {
|
|
- pr_err("failed to enable cppc during amd-pstate driver registration, return %d\n",
|
|
- ret);
|
|
- amd_pstate_driver_cleanup();
|
|
- return ret;
|
|
- }
|
|
-
|
|
/* at least one CPU supports CPB */
|
|
current_pstate_driver->boost_enabled = cpu_feature_enabled(X86_FEATURE_CPB);
|
|
|
|
@@ -1548,11 +1464,15 @@ static int amd_pstate_epp_cpu_init(struc
|
|
policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf,
|
|
cpudata->nominal_freq,
|
|
perf.highest_perf);
|
|
+ policy->driver_data = cpudata;
|
|
+
|
|
+ ret = amd_pstate_cppc_enable(policy);
|
|
+ if (ret)
|
|
+ goto free_cpudata1;
|
|
|
|
/* It will be updated by governor */
|
|
policy->cur = policy->cpuinfo.min_freq;
|
|
|
|
- policy->driver_data = cpudata;
|
|
|
|
policy->boost_enabled = READ_ONCE(cpudata->boost_supported);
|
|
|
|
@@ -1644,31 +1564,11 @@ static int amd_pstate_epp_set_policy(str
|
|
return 0;
|
|
}
|
|
|
|
-static int amd_pstate_epp_reenable(struct cpufreq_policy *policy)
|
|
-{
|
|
- int ret;
|
|
-
|
|
- ret = amd_pstate_cppc_enable(true);
|
|
- if (ret)
|
|
- pr_err("failed to enable amd pstate during resume, return %d\n", ret);
|
|
-
|
|
-
|
|
- return amd_pstate_epp_update_limit(policy);
|
|
-}
|
|
-
|
|
static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
|
|
{
|
|
- struct amd_cpudata *cpudata = policy->driver_data;
|
|
- int ret;
|
|
-
|
|
- pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
|
|
+ pr_debug("AMD CPU Core %d going online\n", policy->cpu);
|
|
|
|
- ret = amd_pstate_epp_reenable(policy);
|
|
- if (ret)
|
|
- return ret;
|
|
- cpudata->suspended = false;
|
|
-
|
|
- return 0;
|
|
+ return amd_pstate_cppc_enable(policy);
|
|
}
|
|
|
|
static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
|
|
@@ -1686,11 +1586,6 @@ static int amd_pstate_epp_cpu_offline(st
|
|
static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
|
|
{
|
|
struct amd_cpudata *cpudata = policy->driver_data;
|
|
- int ret;
|
|
-
|
|
- /* avoid suspending when EPP is not enabled */
|
|
- if (cppc_state != AMD_PSTATE_ACTIVE)
|
|
- return 0;
|
|
|
|
/* invalidate to ensure it's rewritten during resume */
|
|
cpudata->cppc_req_cached = 0;
|
|
@@ -1698,11 +1593,6 @@ static int amd_pstate_epp_suspend(struct
|
|
/* set this flag to avoid setting core offline*/
|
|
cpudata->suspended = true;
|
|
|
|
- /* disable CPPC in lowlevel firmware */
|
|
- ret = amd_pstate_cppc_enable(false);
|
|
- if (ret)
|
|
- pr_err("failed to suspend, return %d\n", ret);
|
|
-
|
|
return 0;
|
|
}
|
|
|
|
@@ -1711,8 +1601,12 @@ static int amd_pstate_epp_resume(struct
|
|
struct amd_cpudata *cpudata = policy->driver_data;
|
|
|
|
if (cpudata->suspended) {
|
|
+ int ret;
|
|
+
|
|
/* enable amd pstate from suspend state*/
|
|
- amd_pstate_epp_reenable(policy);
|
|
+ ret = amd_pstate_epp_update_limit(policy);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
cpudata->suspended = false;
|
|
}
|
|
@@ -1727,8 +1621,6 @@ static struct cpufreq_driver amd_pstate_
|
|
.fast_switch = amd_pstate_fast_switch,
|
|
.init = amd_pstate_cpu_init,
|
|
.exit = amd_pstate_cpu_exit,
|
|
- .suspend = amd_pstate_cpu_suspend,
|
|
- .resume = amd_pstate_cpu_resume,
|
|
.set_boost = amd_pstate_set_boost,
|
|
.update_limits = amd_pstate_update_limits,
|
|
.name = "amd-pstate",
|
|
@@ -1895,7 +1787,6 @@ static int __init amd_pstate_init(void)
|
|
|
|
global_attr_free:
|
|
cpufreq_unregister_driver(current_pstate_driver);
|
|
- amd_pstate_cppc_enable(false);
|
|
return ret;
|
|
}
|
|
device_initcall(amd_pstate_init);
|