183 lines
5.7 KiB
Diff
183 lines
5.7 KiB
Diff
|
From 98f0f9202cd0fc549f5beaaaf8750658d8ee2140 Mon Sep 17 00:00:00 2001
|
||
|
From: Mario Limonciello <mario.limonciello@amd.com>
|
||
|
Date: Thu, 5 Dec 2024 16:28:44 -0600
|
||
|
Subject: cpufreq/amd-pstate: Always write EPP value when updating perf
|
||
|
|
||
|
For MSR systems the EPP value is in the same register as perf targets
|
||
|
and so divding them into two separate MSR writes is wasteful.
|
||
|
|
||
|
In msr_update_perf(), update both EPP and perf values in one write to
|
||
|
MSR_AMD_CPPC_REQ, and cache them if successful.
|
||
|
|
||
|
To accomplish this plumb the EPP value into the update_perf call and modify
|
||
|
all its callers to check the return value.
|
||
|
|
||
|
Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
|
||
|
---
|
||
|
drivers/cpufreq/amd-pstate.c | 71 ++++++++++++++++++++++--------------
|
||
|
1 file changed, 43 insertions(+), 28 deletions(-)
|
||
|
|
||
|
--- a/drivers/cpufreq/amd-pstate.c
|
||
|
+++ b/drivers/cpufreq/amd-pstate.c
|
||
|
@@ -252,25 +252,36 @@ static s16 shmem_get_epp(struct amd_cpud
|
||
|
}
|
||
|
|
||
|
static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
|
||
|
- u32 des_perf, u32 max_perf, bool fast_switch)
|
||
|
+ u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
|
||
|
{
|
||
|
+ u64 value;
|
||
|
+
|
||
|
+ value = READ_ONCE(cpudata->cppc_req_cached);
|
||
|
if (fast_switch) {
|
||
|
wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
|
||
|
return 0;
|
||
|
+ } else {
|
||
|
+ int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
|
||
|
+ READ_ONCE(cpudata->cppc_req_cached));
|
||
|
+ if (ret)
|
||
|
+ return ret;
|
||
|
}
|
||
|
|
||
|
- return wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
|
||
|
- READ_ONCE(cpudata->cppc_req_cached));
|
||
|
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
|
||
|
+ WRITE_ONCE(cpudata->epp_cached, epp);
|
||
|
+
|
||
|
+ return 0;
|
||
|
}
|
||
|
|
||
|
DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
|
||
|
|
||
|
static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
|
||
|
u32 min_perf, u32 des_perf,
|
||
|
- u32 max_perf, bool fast_switch)
|
||
|
+ u32 max_perf, u32 epp,
|
||
|
+ bool fast_switch)
|
||
|
{
|
||
|
return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
|
||
|
- max_perf, fast_switch);
|
||
|
+ max_perf, epp, fast_switch);
|
||
|
}
|
||
|
|
||
|
static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
|
||
|
@@ -489,12 +500,19 @@ static inline int amd_pstate_init_perf(s
|
||
|
return static_call(amd_pstate_init_perf)(cpudata);
|
||
|
}
|
||
|
|
||
|
-static int shmem_update_perf(struct amd_cpudata *cpudata,
|
||
|
- u32 min_perf, u32 des_perf,
|
||
|
- u32 max_perf, bool fast_switch)
|
||
|
+static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
|
||
|
+ u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
|
||
|
{
|
||
|
struct cppc_perf_ctrls perf_ctrls;
|
||
|
|
||
|
+ if (cppc_state == AMD_PSTATE_ACTIVE) {
|
||
|
+ int ret = shmem_set_epp(cpudata, epp);
|
||
|
+
|
||
|
+ if (ret)
|
||
|
+ return ret;
|
||
|
+ WRITE_ONCE(cpudata->epp_cached, epp);
|
||
|
+ }
|
||
|
+
|
||
|
perf_ctrls.max_perf = max_perf;
|
||
|
perf_ctrls.min_perf = min_perf;
|
||
|
perf_ctrls.desired_perf = des_perf;
|
||
|
@@ -575,10 +593,10 @@ static void amd_pstate_update(struct amd
|
||
|
|
||
|
WRITE_ONCE(cpudata->cppc_req_cached, value);
|
||
|
|
||
|
- amd_pstate_update_perf(cpudata, min_perf, des_perf,
|
||
|
- max_perf, fast_switch);
|
||
|
+ amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
|
||
|
|
||
|
cpufreq_policy_put:
|
||
|
+
|
||
|
cpufreq_cpu_put(policy);
|
||
|
}
|
||
|
|
||
|
@@ -1575,6 +1593,7 @@ static int amd_pstate_epp_update_limit(s
|
||
|
{
|
||
|
struct amd_cpudata *cpudata = policy->driver_data;
|
||
|
u64 value;
|
||
|
+ u32 epp;
|
||
|
|
||
|
amd_pstate_update_min_max_limit(policy);
|
||
|
|
||
|
@@ -1587,23 +1606,19 @@ static int amd_pstate_epp_update_limit(s
|
||
|
value |= FIELD_PREP(AMD_PSTATE_MIN_PERF_MASK, cpudata->min_limit_perf);
|
||
|
|
||
|
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
|
||
|
- WRITE_ONCE(cpudata->epp_cached, 0);
|
||
|
- value |= FIELD_PREP(AMD_PSTATE_EPP_PERF_MASK, cpudata->epp_cached);
|
||
|
-
|
||
|
- WRITE_ONCE(cpudata->cppc_req_cached, value);
|
||
|
+ epp = 0;
|
||
|
+ else
|
||
|
+ epp = READ_ONCE(cpudata->epp_cached);
|
||
|
|
||
|
if (trace_amd_pstate_epp_perf_enabled()) {
|
||
|
- trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
|
||
|
- cpudata->epp_cached,
|
||
|
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
|
||
|
cpudata->min_limit_perf,
|
||
|
cpudata->max_limit_perf,
|
||
|
cpudata->boost_state);
|
||
|
}
|
||
|
|
||
|
- amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
|
||
|
- cpudata->max_limit_perf, false);
|
||
|
-
|
||
|
- return amd_pstate_set_epp(cpudata, READ_ONCE(cpudata->epp_cached));
|
||
|
+ return amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
|
||
|
+ cpudata->max_limit_perf, epp, false);
|
||
|
}
|
||
|
|
||
|
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
|
||
|
@@ -1634,7 +1649,7 @@ static int amd_pstate_epp_set_policy(str
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
-static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
|
||
|
+static int amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
|
||
|
{
|
||
|
u64 max_perf;
|
||
|
int ret;
|
||
|
@@ -1652,17 +1667,19 @@ static void amd_pstate_epp_reenable(stru
|
||
|
max_perf, cpudata->boost_state);
|
||
|
}
|
||
|
|
||
|
- amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
|
||
|
- amd_pstate_set_epp(cpudata, cpudata->epp_cached);
|
||
|
+ return amd_pstate_update_perf(cpudata, 0, 0, max_perf, cpudata->epp_cached, false);
|
||
|
}
|
||
|
|
||
|
static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
|
||
|
{
|
||
|
struct amd_cpudata *cpudata = policy->driver_data;
|
||
|
+ int ret;
|
||
|
|
||
|
pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
|
||
|
|
||
|
- amd_pstate_epp_reenable(cpudata);
|
||
|
+ ret = amd_pstate_epp_reenable(cpudata);
|
||
|
+ if (ret)
|
||
|
+ return ret;
|
||
|
cpudata->suspended = false;
|
||
|
|
||
|
return 0;
|
||
|
@@ -1686,10 +1703,8 @@ static int amd_pstate_epp_cpu_offline(st
|
||
|
min_perf, min_perf, cpudata->boost_state);
|
||
|
}
|
||
|
|
||
|
- amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
|
||
|
- amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);
|
||
|
-
|
||
|
- return 0;
|
||
|
+ return amd_pstate_update_perf(cpudata, min_perf, 0, min_perf,
|
||
|
+ AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
|
||
|
}
|
||
|
|
||
|
static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
|