Merge back earlier cpufreq material for 6.18

This commit is contained in:
Rafael J. Wysocki 2025-09-24 21:32:28 +02:00
commit c51f0d3b6e
33 changed files with 430 additions and 328 deletions

View File

@ -1,61 +0,0 @@
Generic cpufreq driver
It is a generic DT based cpufreq driver for frequency management. It supports
both uniprocessor (UP) and symmetric multiprocessor (SMP) systems which share
clock and voltage across all CPUs.
Both required and optional properties listed below must be defined
under node /cpus/cpu@0.
Required properties:
- None
Optional properties:
- operating-points: Refer to Documentation/devicetree/bindings/opp/opp-v1.yaml for
details. OPPs *must* be supplied either via DT, i.e. this property, or
populated at runtime.
- clock-latency: Specify the possible maximum transition latency for clock,
in unit of nanoseconds.
- voltage-tolerance: Specify the CPU voltage tolerance in percentage.
- #cooling-cells:
Please refer to
Documentation/devicetree/bindings/thermal/thermal-cooling-devices.yaml.
Examples:
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
compatible = "arm,cortex-a9";
reg = <0>;
next-level-cache = <&L2>;
operating-points = <
/* kHz uV */
792000 1100000
396000 950000
198000 850000
>;
clock-latency = <61036>; /* two CLK32 periods */
#cooling-cells = <2>;
};
cpu@1 {
compatible = "arm,cortex-a9";
reg = <1>;
next-level-cache = <&L2>;
};
cpu@2 {
compatible = "arm,cortex-a9";
reg = <2>;
next-level-cache = <&L2>;
};
cpu@3 {
compatible = "arm,cortex-a9";
reg = <3>;
next-level-cache = <&L2>;
};
};

View File

@ -22,6 +22,7 @@ properties:
items:
- enum:
- qcom,qcm2290-cpufreq-hw
- qcom,qcs615-cpufreq-hw
- qcom,sc7180-cpufreq-hw
- qcom,sc8180x-cpufreq-hw
- qcom,sdm670-cpufreq-hw
@ -132,6 +133,7 @@ allOf:
compatible:
contains:
enum:
- qcom,qcs615-cpufreq-hw
- qcom,qdu1000-cpufreq-epss
- qcom,sa8255p-cpufreq-epss
- qcom,sa8775p-cpufreq-epss

View File

@ -0,0 +1,82 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/cpufreq/mediatek,mt8196-cpufreq-hw.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Hybrid CPUFreq for MT8196/MT6991 series SoCs
maintainers:
- Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
description:
MT8196 uses CPUFreq management hardware that supports dynamic voltage
frequency scaling (dvfs), and can support several performance domains.
properties:
compatible:
const: mediatek,mt8196-cpufreq-hw
reg:
items:
- description: FDVFS control register region
- description: OPP tables and control for performance domain 0
- description: OPP tables and control for performance domain 1
- description: OPP tables and control for performance domain 2
"#performance-domain-cells":
const: 1
required:
- compatible
- reg
- "#performance-domain-cells"
additionalProperties: false
examples:
- |
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a720";
enable-method = "psci";
performance-domains = <&performance 0>;
reg = <0x000>;
};
/* ... */
cpu6: cpu@600 {
device_type = "cpu";
compatible = "arm,cortex-x4";
enable-method = "psci";
performance-domains = <&performance 1>;
reg = <0x600>;
};
cpu7: cpu@700 {
device_type = "cpu";
compatible = "arm,cortex-x925";
enable-method = "psci";
performance-domains = <&performance 2>;
reg = <0x700>;
};
};
/* ... */
soc {
#address-cells = <2>;
#size-cells = <2>;
performance: performance-controller@c2c2034 {
compatible = "mediatek,mt8196-cpufreq-hw";
reg = <0 0xc220400 0 0x20>, <0 0xc2c0f20 0 0x120>,
<0 0xc2c1040 0 0x120>, <0 0xc2c1160 0 0x120>;
#performance-domain-cells = <1>;
};
};

View File

@ -6349,6 +6349,12 @@ F: kernel/sched/cpufreq*.c
F: rust/kernel/cpufreq.rs
F: tools/testing/selftests/cpufreq/
CPU FREQUENCY DRIVERS - VIRTUAL MACHINE CPUFREQ
M: Saravana Kannan <saravanak@google.com>
L: linux-pm@vger.kernel.org
S: Maintained
F: drivers/cpufreq/virtual-cpufreq.c
CPU HOTPLUG
M: Thomas Gleixner <tglx@linutronix.de>
M: Peter Zijlstra <peterz@infradead.org>

View File

@ -135,7 +135,7 @@
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-supported-hw = <0x01 0x0006>;
opp-supported-hw = <0x01 0x0007>;
clock-latency-ns = <6000000>;
};

View File

@ -318,7 +318,6 @@ static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
return cmd.val;
}
/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
{
struct drv_cmd *cmd = _cmd;
@ -335,14 +334,8 @@ static void drv_write(struct acpi_cpufreq_data *data,
.val = val,
.func.write = data->cpu_freq_write,
};
int this_cpu;
this_cpu = get_cpu();
if (cpumask_test_cpu(this_cpu, mask))
do_drv_write(&cmd);
smp_call_function_many(mask, do_drv_write, &cmd, 1);
put_cpu();
on_each_cpu_mask(mask, do_drv_write, &cmd, true);
}
static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)

View File

@ -107,6 +107,7 @@ static struct platform_driver airoha_cpufreq_driver = {
};
static const struct of_device_id airoha_cpufreq_match_list[] __initconst = {
{ .compatible = "airoha,an7583" },
{ .compatible = "airoha,en7581" },
{},
};

View File

@ -265,7 +265,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
*/
target_vm = avs_map[l0_vdd_min] - 100;
target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
target_vm = max(target_vm, MIN_VOLT_MV);
dvfs->avs[1] = armada_37xx_avs_val_match(target_vm);
/*
@ -273,7 +273,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
* be larger than 1000mv
*/
target_vm = avs_map[l0_vdd_min] - 150;
target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
target_vm = max(target_vm, MIN_VOLT_MV);
dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
/*

View File

@ -480,7 +480,7 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct private_data *priv;
if (!policy)
@ -488,8 +488,6 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
priv = policy->driver_data;
cpufreq_cpu_put(policy);
return brcm_avs_get_frequency(priv->base);
}

View File

@ -50,8 +50,7 @@ struct cppc_freq_invariance {
static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
static struct kthread_worker *kworker_fie;
static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
struct cppc_perf_fb_ctrs *fb_ctrs_t0,
static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1);
/**
@ -87,8 +86,7 @@ static void cppc_scale_freq_workfn(struct kthread_work *work)
return;
}
perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
&fb_ctrs);
perf = cppc_perf_from_fbctrs(&cppc_fi->prev_perf_fb_ctrs, &fb_ctrs);
if (!perf)
return;
@ -684,8 +682,7 @@ static inline u64 get_delta(u64 t1, u64 t0)
return (u32)t1 - (u32)t0;
}
static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
struct cppc_perf_fb_ctrs *fb_ctrs_t0,
static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1)
{
u64 delta_reference, delta_delivered;
@ -725,8 +722,8 @@ static int cppc_get_perf_ctrs_sample(int cpu,
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cppc_cpudata *cpu_data;
u64 delivered_perf;
int ret;
@ -736,8 +733,6 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
cpu_data = policy->driver_data;
cpufreq_cpu_put(policy);
ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1);
if (ret) {
if (ret == -EFAULT)
@ -747,8 +742,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
return 0;
}
delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
&fb_ctrs_t1);
delivered_perf = cppc_perf_from_fbctrs(&fb_ctrs_t0, &fb_ctrs_t1);
if (!delivered_perf)
goto out_invalid_counters;

View File

@ -103,6 +103,7 @@ static const struct of_device_id allowlist[] __initconst = {
* platforms using "operating-points-v2" property.
*/
static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "airoha,an7583", },
{ .compatible = "airoha,en7581", },
{ .compatible = "allwinner,sun50i-a100" },
@ -188,9 +189,11 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "ti,omap3", },
{ .compatible = "ti,am625", },
{ .compatible = "ti,am62a7", },
{ .compatible = "ti,am62d2", },
{ .compatible = "ti,am62p5", },
{ .compatible = "qcom,ipq5332", },
{ .compatible = "qcom,ipq5424", },
{ .compatible = "qcom,ipq6018", },
{ .compatible = "qcom,ipq8064", },
{ .compatible = "qcom,ipq8074", },

View File

@ -664,10 +664,10 @@ unlock:
static unsigned int cpufreq_parse_policy(char *str_governor)
{
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
if (!strncasecmp(str_governor, "performance", strlen("performance")))
return CPUFREQ_POLICY_PERFORMANCE;
if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
if (!strncasecmp(str_governor, "powersave", strlen("powersave")))
return CPUFREQ_POLICY_POWERSAVE;
return CPUFREQ_POLICY_UNKNOWN;
@ -914,7 +914,7 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int freq = 0;
unsigned int ret;
int ret;
if (!policy->governor || !policy->governor->store_setspeed)
return -EINVAL;
@ -1121,7 +1121,8 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
if (has_target()) {
/* Update policy governor to the one used before hotplug. */
gov = get_governor(policy->last_governor);
if (policy->last_governor[0] != '\0')
gov = get_governor(policy->last_governor);
if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
gov->name, policy->cpu);
@ -1844,7 +1845,6 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags);
@ -1859,7 +1859,7 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
policy = cpufreq_cpu_get(cpu);
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->cur;
@ -1875,9 +1875,7 @@ EXPORT_SYMBOL(cpufreq_quick_get);
*/
unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
struct cpufreq_policy *policy __free(put_cpufreq_policy);
policy = cpufreq_cpu_get(cpu);
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->max;
@ -1893,9 +1891,7 @@ EXPORT_SYMBOL(cpufreq_quick_get_max);
*/
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
{
struct cpufreq_policy *policy __free(put_cpufreq_policy);
policy = cpufreq_cpu_get(cpu);
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->cpuinfo.max_freq;
@ -1919,9 +1915,7 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
struct cpufreq_policy *policy __free(put_cpufreq_policy);
policy = cpufreq_cpu_get(cpu);
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
@ -2750,9 +2744,7 @@ static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
*/
void cpufreq_update_policy(unsigned int cpu)
{
struct cpufreq_policy *policy __free(put_cpufreq_policy);
policy = cpufreq_cpu_get(cpu);
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return;
@ -2769,9 +2761,7 @@ EXPORT_SYMBOL(cpufreq_update_policy);
*/
void cpufreq_update_limits(unsigned int cpu)
{
struct cpufreq_policy *policy __free(put_cpufreq_policy);
policy = cpufreq_cpu_get(cpu);
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return;
@ -2792,7 +2782,7 @@ int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
if (!policy->freq_table)
return -ENXIO;
ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret) {
pr_err("%s: Policy frequency update failed\n", __func__);
return ret;
@ -2921,10 +2911,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
return -EPROBE_DEFER;
if (!driver_data || !driver_data->verify || !driver_data->init ||
!(driver_data->setpolicy || driver_data->target_index ||
driver_data->target) ||
(driver_data->setpolicy && (driver_data->target_index ||
driver_data->target)) ||
(driver_data->target_index && driver_data->target) ||
(!!driver_data->setpolicy == (driver_data->target_index || driver_data->target)) ||
(!driver_data->get_intermediate != !driver_data->target_intermediate) ||
(!driver_data->online != !driver_data->offline) ||
(driver_data->adjust_perf && !driver_data->fast_switch))
@ -3058,9 +3046,7 @@ static int __init cpufreq_core_init(void)
static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
{
struct cpufreq_policy *policy __free(put_cpufreq_policy);
policy = cpufreq_cpu_get(cpu);
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy) {
pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
return false;

View File

@ -152,9 +152,9 @@ static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
ret = kstrtouint(buf, 0, &input);
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
if (ret || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
dbs_data->sampling_down_factor = input;
@ -168,9 +168,9 @@ static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
ret = kstrtouint(buf, 0, &input);
if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
if (ret || input > 100 || input <= cs_tuners->down_threshold)
return -EINVAL;
dbs_data->up_threshold = input;
@ -184,10 +184,10 @@ static ssize_t down_threshold_store(struct gov_attr_set *attr_set,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
ret = kstrtouint(buf, 0, &input);
/* cannot be lower than 1 otherwise freq will not fall */
if (ret != 1 || input < 1 || input >= dbs_data->up_threshold)
if (ret || input < 1 || input >= dbs_data->up_threshold)
return -EINVAL;
cs_tuners->down_threshold = input;
@ -201,9 +201,9 @@ static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
ret = kstrtouint(buf, 0, &input);
if (ret)
return ret;
if (input > 1)
input = 1;
@ -226,10 +226,10 @@ static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
ret = kstrtouint(buf, 0, &input);
if (ret != 1)
return -EINVAL;
if (ret)
return ret;
if (input > 100)
input = 100;

View File

@ -29,29 +29,6 @@ static struct od_ops od_ops;
static unsigned int default_powersave_bias;
/*
* Not all CPUs want IO time to be accounted as busy; this depends on how
* efficient idling at a higher frequency/voltage is.
* Pavel Machek says this is not so for various generations of AMD and old
* Intel systems.
* Mike Chan (android.com) claims this is also not true for ARM.
* Because of this, whitelist specific known (series) of CPUs by default, and
* leave all others up to the user.
*/
static int should_io_be_busy(void)
{
#if defined(CONFIG_X86)
/*
* For Intel, Core 2 (model 15) and later have an efficient idle.
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model >= 15)
return 1;
#endif
return 0;
}
/*
* Find right freq to be set now with powersave_bias on.
* Returns the freq_hi to be used right now and will set freq_hi_delay_us,
@ -377,7 +354,7 @@ static int od_init(struct dbs_data *dbs_data)
dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
dbs_data->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
dbs_data->io_is_busy = should_io_be_busy();
dbs_data->io_is_busy = od_should_io_be_busy();
dbs_data->tuners = tuners;
return 0;

View File

@ -24,3 +24,26 @@ static inline struct od_policy_dbs_info *to_dbs_info(struct policy_dbs_info *pol
struct od_dbs_tuners {
unsigned int powersave_bias;
};
#ifdef CONFIG_X86
#include <asm/cpu_device_id.h>
/*
* Not all CPUs want IO time to be accounted as busy; this depends on
* how efficient idling at a higher frequency/voltage is.
*
* Pavel Machek says this is not so for various generations of AMD and
* old Intel systems. Mike Chan (android.com) claims this is also not
* true for ARM.
*
* Because of this, select a known series of Intel CPUs (Family 6 and
* later) by default, and leave all others up to the user.
*/
static inline bool od_should_io_be_busy(void)
{
return (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86_vfm >= INTEL_PENTIUM_PRO);
}
#else
static inline bool od_should_io_be_busy(void) { return false; }
#endif

View File

@ -28,22 +28,21 @@ static bool policy_has_boost_freq(struct cpufreq_policy *policy)
return false;
}
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *pos;
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
unsigned int min_freq = ~0;
unsigned int max_freq = 0;
unsigned int freq;
unsigned int freq, i;
cpufreq_for_each_valid_entry(pos, table) {
cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
if ((!cpufreq_boost_enabled() || !policy->boost_enabled)
&& (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq);
pr_debug("table entry %u: %u kHz\n", i, freq);
if (freq < min_freq)
min_freq = freq;
if (freq > max_freq)
@ -65,10 +64,9 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
return 0;
}
int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
struct cpufreq_frequency_table *table)
int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy)
{
struct cpufreq_frequency_table *pos;
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
unsigned int freq, prev_smaller = 0;
bool found = false;
@ -110,7 +108,7 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
if (!policy->freq_table)
return -ENODEV;
return cpufreq_frequency_table_verify(policy, policy->freq_table);
return cpufreq_frequency_table_verify(policy);
}
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
@ -128,7 +126,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
};
struct cpufreq_frequency_table *pos;
struct cpufreq_frequency_table *table = policy->freq_table;
unsigned int freq, diff, i = 0;
unsigned int freq, diff, i;
int index;
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
@ -354,7 +352,7 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
return 0;
}
ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret)
return ret;

View File

@ -620,24 +620,9 @@ static int min_perf_pct_min(void)
(cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
}
static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
{
u64 epb;
int ret;
if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;
ret = rdmsrq_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
if (ret)
return (s16)ret;
return (s16)(epb & 0x0f);
}
static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{
s16 epp;
s16 epp = -EOPNOTSUPP;
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/*
@ -651,34 +636,13 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
return epp;
}
epp = (hwp_req_data >> 24) & 0xff;
} else {
/* When there is no EPP present, HWP uses EPB settings */
epp = intel_pstate_get_epb(cpu_data);
}
return epp;
}
static int intel_pstate_set_epb(int cpu, s16 pref)
{
u64 epb;
int ret;
if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;
ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
if (ret)
return ret;
epb = (epb & ~0x0f) | pref;
wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
return 0;
}
/*
* EPP/EPB display strings corresponding to EPP index in the
* EPP display strings corresponding to EPP index in the
* energy_perf_strings[]
* index String
*-------------------------------------
@ -782,7 +746,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
u32 raw_epp)
{
int epp = -EINVAL;
int ret;
int ret = -EOPNOTSUPP;
if (!pref_index)
epp = cpu_data->epp_default;
@ -802,10 +766,6 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
return -EBUSY;
ret = intel_pstate_set_epp(cpu_data, epp);
} else {
if (epp == -EINVAL)
epp = (pref_index - 1) << 2;
ret = intel_pstate_set_epb(cpu_data->cpu, epp);
}
return ret;
@ -937,11 +897,19 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
cpufreq_freq_attr_ro(base_frequency);
enum hwp_cpufreq_attr_index {
HWP_BASE_FREQUENCY_INDEX = 0,
HWP_PERFORMANCE_PREFERENCE_INDEX,
HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX,
HWP_CPUFREQ_ATTR_COUNT,
};
static struct freq_attr *hwp_cpufreq_attrs[] = {
&energy_performance_preference,
&energy_performance_available_preferences,
&base_frequency,
NULL,
[HWP_BASE_FREQUENCY_INDEX] = &base_frequency,
[HWP_PERFORMANCE_PREFERENCE_INDEX] = &energy_performance_preference,
[HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] =
&energy_performance_available_preferences,
[HWP_CPUFREQ_ATTR_COUNT] = NULL,
};
static bool no_cas __ro_after_init;
@ -1337,9 +1305,8 @@ static void intel_pstate_hwp_set(unsigned int cpu)
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
} else {
intel_pstate_set_epb(cpu, epp);
}
skip_epp:
WRITE_ONCE(cpu_data->hwp_req_cached, value);
wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value);
@ -1411,6 +1378,9 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
#define POWER_CTL_EE_ENABLE 1
#define POWER_CTL_EE_DISABLE 2
/* Enable bit for Dynamic Efficiency Control (DEC) */
#define POWER_CTL_DEC_ENABLE 27
static int power_ctl_ee_state;
static void set_power_ctl_ee_state(bool input)
@ -1502,9 +1472,7 @@ static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy,
static bool intel_pstate_update_max_freq(struct cpudata *cpudata)
{
struct cpufreq_policy *policy __free(put_cpufreq_policy);
policy = cpufreq_cpu_get(cpudata->cpu);
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu);
if (!policy)
return false;
@ -1695,41 +1663,40 @@ unlock_driver:
return count;
}
static void update_qos_request(enum freq_qos_req_type type)
static void update_cpu_qos_request(int cpu, enum freq_qos_req_type type)
{
struct cpudata *cpudata = all_cpu_data[cpu];
unsigned int freq = cpudata->pstate.turbo_freq;
struct freq_qos_request *req;
struct cpufreq_policy *policy;
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return;
req = policy->driver_data;
if (!req)
return;
if (hwp_active)
intel_pstate_get_hwp_cap(cpudata);
if (type == FREQ_QOS_MIN) {
freq = DIV_ROUND_UP(freq * global.min_perf_pct, 100);
} else {
req++;
freq = (freq * global.max_perf_pct) / 100;
}
if (freq_qos_update_request(req, freq) < 0)
pr_warn("Failed to update freq constraint: CPU%d\n", cpu);
}
static void update_qos_requests(enum freq_qos_req_type type)
{
int i;
for_each_possible_cpu(i) {
struct cpudata *cpu = all_cpu_data[i];
unsigned int freq, perf_pct;
policy = cpufreq_cpu_get(i);
if (!policy)
continue;
req = policy->driver_data;
cpufreq_cpu_put(policy);
if (!req)
continue;
if (hwp_active)
intel_pstate_get_hwp_cap(cpu);
if (type == FREQ_QOS_MIN) {
perf_pct = global.min_perf_pct;
} else {
req++;
perf_pct = global.max_perf_pct;
}
freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100);
if (freq_qos_update_request(req, freq) < 0)
pr_warn("Failed to update freq constraint: CPU%d\n", i);
}
for_each_possible_cpu(i)
update_cpu_qos_request(i, type);
}
static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
@ -1758,7 +1725,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
update_qos_request(FREQ_QOS_MAX);
update_qos_requests(FREQ_QOS_MAX);
mutex_unlock(&intel_pstate_driver_lock);
@ -1792,7 +1759,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
update_qos_request(FREQ_QOS_MIN);
update_qos_requests(FREQ_QOS_MIN);
mutex_unlock(&intel_pstate_driver_lock);
@ -2575,7 +2542,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
* that sample.time will always be reset before setting the utilization
* update hook and make the caller skip the sample then.
*/
if (cpu->last_sample_time) {
if (likely(cpu->last_sample_time)) {
intel_pstate_calc_avg_perf(cpu);
return true;
}
@ -3802,6 +3769,26 @@ static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
{}
};
static bool hwp_check_epp(void)
{
if (boot_cpu_has(X86_FEATURE_HWP_EPP))
return true;
/* Without EPP support, don't expose EPP-related sysfs attributes. */
hwp_cpufreq_attrs[HWP_PERFORMANCE_PREFERENCE_INDEX] = NULL;
hwp_cpufreq_attrs[HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] = NULL;
return false;
}
static bool hwp_check_dec(void)
{
u64 power_ctl;
rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
return !!(power_ctl & BIT(POWER_CTL_DEC_ENABLE));
}
static int __init intel_pstate_init(void)
{
static struct cpudata **_all_cpu_data;
@ -3822,23 +3809,32 @@ static int __init intel_pstate_init(void)
id = x86_match_cpu(hwp_support_ids);
if (id) {
hwp_forced = intel_pstate_hwp_is_enabled();
bool epp_present = hwp_check_epp();
if (hwp_forced)
/*
* If HWP is enabled already, there is no choice but to deal
* with it.
*/
hwp_forced = intel_pstate_hwp_is_enabled();
if (hwp_forced) {
pr_info("HWP enabled by BIOS\n");
else if (no_load)
no_hwp = 0;
} else if (no_load) {
return -ENODEV;
} else if (!epp_present && !hwp_check_dec()) {
/*
* Avoid enabling HWP for processors without EPP support
* unless the Dynamic Efficiency Control (DEC) enable
* bit (MSR_IA32_POWER_CTL, bit 27) is set because that
* means incomplete HWP implementation which is a corner
* case and supporting it is generally problematic.
*/
no_hwp = 1;
}
copy_cpu_funcs(&core_funcs);
/*
* Avoid enabling HWP for processors without EPP support,
* because that means incomplete HWP implementation which is a
* corner case and supporting it is generally problematic.
*
* If HWP is enabled already, though, there is no choice but to
* deal with it.
*/
if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
if (!no_hwp) {
hwp_active = true;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;

View File

@ -953,6 +953,9 @@ static void __exit longhaul_exit(void)
struct cpufreq_policy *policy = cpufreq_cpu_get(0);
int i;
if (unlikely(!policy))
return;
for (i = 0; i < numscales; i++) {
if (mults[i] == maxmult) {
struct cpufreq_freqs freqs;

View File

@ -24,6 +24,8 @@
#define POLL_USEC 1000
#define TIMEOUT_USEC 300000
#define FDVFS_FDIV_HZ (26 * 1000)
enum {
REG_FREQ_LUT_TABLE,
REG_FREQ_ENABLE,
@ -35,7 +37,14 @@ enum {
REG_ARRAY_SIZE,
};
struct mtk_cpufreq_data {
struct mtk_cpufreq_priv {
struct device *dev;
const struct mtk_cpufreq_variant *variant;
void __iomem *fdvfs;
};
struct mtk_cpufreq_domain {
struct mtk_cpufreq_priv *parent;
struct cpufreq_frequency_table *table;
void __iomem *reg_bases[REG_ARRAY_SIZE];
struct resource *res;
@ -43,20 +52,51 @@ struct mtk_cpufreq_data {
int nr_opp;
};
static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = {
[REG_FREQ_LUT_TABLE] = 0x0,
[REG_FREQ_ENABLE] = 0x84,
[REG_FREQ_PERF_STATE] = 0x88,
[REG_FREQ_HW_STATE] = 0x8c,
[REG_EM_POWER_TBL] = 0x90,
[REG_FREQ_LATENCY] = 0x110,
struct mtk_cpufreq_variant {
int (*init)(struct mtk_cpufreq_priv *priv);
const u16 reg_offsets[REG_ARRAY_SIZE];
const bool is_hybrid_dvfs;
};
static const struct mtk_cpufreq_variant cpufreq_mtk_base_variant = {
.reg_offsets = {
[REG_FREQ_LUT_TABLE] = 0x0,
[REG_FREQ_ENABLE] = 0x84,
[REG_FREQ_PERF_STATE] = 0x88,
[REG_FREQ_HW_STATE] = 0x8c,
[REG_EM_POWER_TBL] = 0x90,
[REG_FREQ_LATENCY] = 0x110,
},
};
static int mtk_cpufreq_hw_mt8196_init(struct mtk_cpufreq_priv *priv)
{
priv->fdvfs = devm_of_iomap(priv->dev, priv->dev->of_node, 0, NULL);
if (IS_ERR(priv->fdvfs))
return dev_err_probe(priv->dev, PTR_ERR(priv->fdvfs),
"failed to get fdvfs iomem\n");
return 0;
}
static const struct mtk_cpufreq_variant cpufreq_mtk_mt8196_variant = {
.init = mtk_cpufreq_hw_mt8196_init,
.reg_offsets = {
[REG_FREQ_LUT_TABLE] = 0x0,
[REG_FREQ_ENABLE] = 0x84,
[REG_FREQ_PERF_STATE] = 0x88,
[REG_FREQ_HW_STATE] = 0x8c,
[REG_EM_POWER_TBL] = 0x90,
[REG_FREQ_LATENCY] = 0x114,
},
.is_hybrid_dvfs = true,
};
static int __maybe_unused
mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
unsigned long *KHz)
{
struct mtk_cpufreq_data *data;
struct mtk_cpufreq_domain *data;
struct cpufreq_policy *policy;
int i;
@ -80,19 +120,38 @@ mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
return 0;
}
static void mtk_cpufreq_hw_fdvfs_switch(unsigned int target_freq,
struct cpufreq_policy *policy)
{
struct mtk_cpufreq_domain *data = policy->driver_data;
struct mtk_cpufreq_priv *priv = data->parent;
unsigned int cpu;
target_freq = DIV_ROUND_UP(target_freq, FDVFS_FDIV_HZ);
for_each_cpu(cpu, policy->real_cpus) {
writel_relaxed(target_freq, priv->fdvfs + cpu * 4);
}
}
static int mtk_cpufreq_hw_target_index(struct cpufreq_policy *policy,
unsigned int index)
{
struct mtk_cpufreq_data *data = policy->driver_data;
struct mtk_cpufreq_domain *data = policy->driver_data;
unsigned int target_freq;
writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
if (data->parent->fdvfs) {
target_freq = policy->freq_table[index].frequency;
mtk_cpufreq_hw_fdvfs_switch(target_freq, policy);
} else {
writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
}
return 0;
}
static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
{
struct mtk_cpufreq_data *data;
struct mtk_cpufreq_domain *data;
struct cpufreq_policy *policy;
unsigned int index;
@ -111,18 +170,21 @@ static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct mtk_cpufreq_data *data = policy->driver_data;
struct mtk_cpufreq_domain *data = policy->driver_data;
unsigned int index;
index = cpufreq_table_find_index_dl(policy, target_freq, false);
writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
if (data->parent->fdvfs)
mtk_cpufreq_hw_fdvfs_switch(target_freq, policy);
else
writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
return policy->freq_table[index].frequency;
}
static int mtk_cpu_create_freq_table(struct platform_device *pdev,
struct mtk_cpufreq_data *data)
struct mtk_cpufreq_domain *data)
{
struct device *dev = &pdev->dev;
u32 temp, i, freq, prev_freq = 0;
@ -157,9 +219,9 @@ static int mtk_cpu_create_freq_table(struct platform_device *pdev,
static int mtk_cpu_resources_init(struct platform_device *pdev,
struct cpufreq_policy *policy,
const u16 *offsets)
struct mtk_cpufreq_priv *priv)
{
struct mtk_cpufreq_data *data;
struct mtk_cpufreq_domain *data;
struct device *dev = &pdev->dev;
struct resource *res;
struct of_phandle_args args;
@ -180,6 +242,15 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
index = args.args[0];
of_node_put(args.np);
/*
* In a cpufreq with hybrid DVFS, such as the MT8196, the first declared
* register range is for FDVFS, followed by the frequency domain MMIOs.
*/
if (priv->variant->is_hybrid_dvfs)
index++;
data->parent = priv;
res = platform_get_resource(pdev, IORESOURCE_MEM, index);
if (!res) {
dev_err(dev, "failed to get mem resource %d\n", index);
@ -202,7 +273,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
data->res = res;
for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++)
data->reg_bases[i] = base + offsets[i];
data->reg_bases[i] = base + priv->variant->reg_offsets[i];
ret = mtk_cpu_create_freq_table(pdev, data);
if (ret) {
@ -223,7 +294,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
{
struct platform_device *pdev = cpufreq_get_driver_data();
int sig, pwr_hw = CPUFREQ_HW_STATUS | SVS_HW_STATUS;
struct mtk_cpufreq_data *data;
struct mtk_cpufreq_domain *data;
unsigned int latency;
int ret;
@ -262,7 +333,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
struct mtk_cpufreq_data *data = policy->driver_data;
struct mtk_cpufreq_domain *data = policy->driver_data;
struct resource *res = data->res;
void __iomem *base = data->base;
@ -275,7 +346,7 @@ static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
static void mtk_cpufreq_register_em(struct cpufreq_policy *policy)
{
struct em_data_callback em_cb = EM_DATA_CB(mtk_cpufreq_get_cpu_power);
struct mtk_cpufreq_data *data = policy->driver_data;
struct mtk_cpufreq_domain *data = policy->driver_data;
em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp,
&em_cb, policy->cpus, true);
@ -297,6 +368,7 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
{
struct mtk_cpufreq_priv *priv;
const void *data;
int ret, cpu;
struct device *cpu_dev;
@ -320,7 +392,20 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
if (!data)
return -EINVAL;
platform_set_drvdata(pdev, (void *) data);
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->variant = data;
priv->dev = &pdev->dev;
if (priv->variant->init) {
ret = priv->variant->init(priv);
if (ret)
return ret;
}
platform_set_drvdata(pdev, priv);
cpufreq_mtk_hw_driver.driver_data = pdev;
ret = cpufreq_register_driver(&cpufreq_mtk_hw_driver);
@ -336,7 +421,8 @@ static void mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
}
static const struct of_device_id mtk_cpufreq_hw_match[] = {
{ .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
{ .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_base_variant },
{ .compatible = "mediatek,mt8196-cpufreq-hw", .data = &cpufreq_mtk_mt8196_variant },
{}
};
MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match);

View File

@ -123,7 +123,7 @@ static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
soc_data->sram_max_volt);
return ret;
}
} else if (pre_vproc > new_vproc) {
} else {
vproc = max(new_vproc,
pre_vsram - soc_data->max_volt_shift);
ret = regulator_set_voltage(proc_reg, vproc,
@ -320,7 +320,6 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
struct dev_pm_opp *new_opp;
struct mtk_cpu_dvfs_info *info;
unsigned long freq, volt;
struct cpufreq_policy *policy;
int ret = 0;
info = container_of(nb, struct mtk_cpu_dvfs_info, opp_nb);
@ -353,12 +352,12 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
}
dev_pm_opp_put(new_opp);
policy = cpufreq_cpu_get(info->opp_cpu);
if (policy) {
struct cpufreq_policy *policy __free(put_cpufreq_policy)
= cpufreq_cpu_get(info->opp_cpu);
if (policy)
cpufreq_driver_target(policy, freq / 1000,
CPUFREQ_RELATION_L);
cpufreq_cpu_put(policy);
}
}
}

View File

@ -200,6 +200,10 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
case QCOM_ID_IPQ9574:
drv->versions = 1 << (unsigned int)(*speedbin);
break;
case QCOM_ID_IPQ5424:
case QCOM_ID_IPQ5404:
drv->versions = (*speedbin == 0x3b) ? BIT(1) : BIT(0);
break;
case QCOM_ID_MSM8996SG:
case QCOM_ID_APQ8096SG:
drv->versions = 1 << ((unsigned int)(*speedbin) + 4);
@ -591,6 +595,7 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst __maybe_u
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
{ .compatible = "qcom,ipq5332", .data = &match_data_kryo },
{ .compatible = "qcom,ipq5424", .data = &match_data_kryo },
{ .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 },
{ .compatible = "qcom,ipq8064", .data = &match_data_ipq8064 },
{ .compatible = "qcom,ipq8074", .data = &match_data_ipq8074 },

View File

@ -554,17 +554,15 @@ out_dmc0:
static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(0);
int ret;
struct cpufreq_policy *policy;
policy = cpufreq_cpu_get(0);
if (!policy) {
pr_debug("cpufreq: get no policy for cpu0\n");
return NOTIFY_BAD;
}
ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0);
cpufreq_cpu_put(policy);
if (ret < 0)
return NOTIFY_BAD;

View File

@ -15,6 +15,7 @@
#include <linux/energy_model.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
@ -424,6 +425,15 @@ static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
return true;
}
/*
* Older Broadcom STB chips had a "clocks" property for CPU node(s)
* that did not match the SCMI performance protocol node, if we got
* there, it means we had such an older Device Tree, therefore return
* true to preserve backwards compatibility.
*/
if (of_machine_is_compatible("brcm,brcmstb"))
return true;
return false;
}

View File

@ -89,11 +89,9 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
static int sh_cpufreq_verify(struct cpufreq_policy_data *policy)
{
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
struct cpufreq_frequency_table *freq_table;
freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
if (freq_table)
return cpufreq_frequency_table_verify(policy, freq_table);
if (policy->freq_table)
return cpufreq_frequency_table_verify(policy);
cpufreq_verify_within_cpu_limits(policy);

View File

@ -378,16 +378,16 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor);
* DETECT SPEEDSTEP SPEEDS *
*********************************************************************/
unsigned int speedstep_get_freqs(enum speedstep_processor processor,
unsigned int *low_speed,
unsigned int *high_speed,
unsigned int *transition_latency,
void (*set_state) (unsigned int state))
int speedstep_get_freqs(enum speedstep_processor processor,
unsigned int *low_speed,
unsigned int *high_speed,
unsigned int *transition_latency,
void (*set_state)(unsigned int state))
{
unsigned int prev_speed;
unsigned int ret = 0;
unsigned long flags;
ktime_t tv1, tv2;
int ret = 0;
if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
return -EINVAL;

View File

@ -41,8 +41,8 @@ extern unsigned int speedstep_get_frequency(enum speedstep_processor processor);
* SPEEDSTEP_LOW; the second argument is zero so that no
* cpufreq_notify_transition calls are initiated.
*/
extern unsigned int speedstep_get_freqs(enum speedstep_processor processor,
unsigned int *low_speed,
unsigned int *high_speed,
unsigned int *transition_latency,
void (*set_state) (unsigned int state));
extern int speedstep_get_freqs(enum speedstep_processor processor,
unsigned int *low_speed,
unsigned int *high_speed,
unsigned int *transition_latency,
void (*set_state)(unsigned int state));

View File

@ -103,13 +103,12 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
static unsigned int tegra186_cpufreq_get(unsigned int cpu)
{
struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
struct tegra186_cpufreq_cluster *cluster;
struct cpufreq_policy *policy;
unsigned int edvd_offset, cluster_id;
u32 ndiv;
policy = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
@ -117,7 +116,6 @@ static unsigned int tegra186_cpufreq_get(unsigned int cpu)
ndiv = readl(data->regs + edvd_offset) & EDVD_CORE_VOLT_FREQ_F_MASK;
cluster_id = data->cpus[policy->cpu].bpmp_cluster_id;
cluster = &data->clusters[cluster_id];
cpufreq_cpu_put(policy);
return (cluster->ref_clk_khz * ndiv) / cluster->div;
}

View File

@ -72,7 +72,9 @@ enum {
#define AM62P5_EFUSE_O_MPU_OPP 15
#define AM62P5_EFUSE_S_MPU_OPP 19
#define AM62P5_EFUSE_T_MPU_OPP 20
#define AM62P5_EFUSE_U_MPU_OPP 21
#define AM62P5_EFUSE_V_MPU_OPP 22
#define AM62P5_SUPPORT_O_MPU_OPP BIT(0)
#define AM62P5_SUPPORT_U_MPU_OPP BIT(2)
@ -153,7 +155,9 @@ static unsigned long am62p5_efuse_xlate(struct ti_cpufreq_data *opp_data,
unsigned long calculated_efuse = AM62P5_SUPPORT_O_MPU_OPP;
switch (efuse) {
case AM62P5_EFUSE_V_MPU_OPP:
case AM62P5_EFUSE_U_MPU_OPP:
case AM62P5_EFUSE_T_MPU_OPP:
case AM62P5_EFUSE_S_MPU_OPP:
calculated_efuse |= AM62P5_SUPPORT_U_MPU_OPP;
fallthrough;
@ -307,9 +311,10 @@ static struct ti_cpufreq_soc_data am3517_soc_data = {
};
static const struct soc_device_attribute k3_cpufreq_soc[] = {
{ .family = "AM62X", .revision = "SR1.0" },
{ .family = "AM62AX", .revision = "SR1.0" },
{ .family = "AM62PX", .revision = "SR1.0" },
{ .family = "AM62X", },
{ .family = "AM62AX", },
{ .family = "AM62PX", },
{ .family = "AM62DX", },
{ /* sentinel */ }
};
@ -457,6 +462,7 @@ static const struct of_device_id ti_cpufreq_of_match[] __maybe_unused = {
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
{ .compatible = "ti,am625", .data = &am625_soc_data, },
{ .compatible = "ti,am62a7", .data = &am62a7_soc_data, },
{ .compatible = "ti,am62d2", .data = &am62a7_soc_data, },
{ .compatible = "ti,am62p5", .data = &am62p5_soc_data, },
/* legacy */
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },

View File

@ -250,7 +250,7 @@ static int virt_cpufreq_offline(struct cpufreq_policy *policy)
static int virt_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
{
if (policy->freq_table)
return cpufreq_frequency_table_verify(policy, policy->freq_table);
return cpufreq_frequency_table_verify(policy);
cpufreq_verify_within_cpu_limits(policy);
return 0;

View File

@ -780,11 +780,10 @@ struct cpufreq_frequency_table {
else
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy);
int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy);
int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
struct cpufreq_frequency_table *table);
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,

View File

@ -543,7 +543,7 @@ impl Policy {
pub fn cpus(&mut self) -> &mut cpumask::Cpumask {
// SAFETY: The pointer to `cpus` is valid for writing and remains valid for the lifetime of
// the returned reference.
unsafe { cpumask::CpumaskVar::as_mut_ref(&mut self.as_mut_ref().cpus) }
unsafe { cpumask::CpumaskVar::from_raw_mut(&mut self.as_mut_ref().cpus) }
}
/// Sets clock for the [`Policy`].

View File

@ -212,6 +212,7 @@ impl Cpumask {
/// }
/// assert_eq!(mask2.weight(), count);
/// ```
#[repr(transparent)]
pub struct CpumaskVar {
#[cfg(CONFIG_CPUMASK_OFFSTACK)]
ptr: NonNull<Cpumask>,
@ -270,7 +271,7 @@ impl CpumaskVar {
///
/// The caller must ensure that `ptr` is valid for writing and remains valid for the lifetime
/// of the returned reference.
pub unsafe fn as_mut_ref<'a>(ptr: *mut bindings::cpumask_var_t) -> &'a mut Self {
pub unsafe fn from_raw_mut<'a>(ptr: *mut bindings::cpumask_var_t) -> &'a mut Self {
// SAFETY: Guaranteed by the safety requirements of the function.
//
// INVARIANT: The caller ensures that `ptr` is valid for writing and remains valid for the
@ -284,7 +285,7 @@ impl CpumaskVar {
///
/// The caller must ensure that `ptr` is valid for reading and remains valid for the lifetime
/// of the returned reference.
pub unsafe fn as_ref<'a>(ptr: *const bindings::cpumask_var_t) -> &'a Self {
pub unsafe fn from_raw<'a>(ptr: *const bindings::cpumask_var_t) -> &'a Self {
// SAFETY: Guaranteed by the safety requirements of the function.
//
// INVARIANT: The caller ensures that `ptr` is valid for reading and remains valid for the

View File

@ -16,7 +16,8 @@ use crate::{
ffi::c_ulong,
prelude::*,
str::CString,
types::{ARef, AlwaysRefCounted, Opaque},
sync::aref::{ARef, AlwaysRefCounted},
types::Opaque,
};
#[cfg(CONFIG_CPU_FREQ)]
@ -162,7 +163,7 @@ impl From<MicroWatt> for c_ulong {
/// use kernel::device::Device;
/// use kernel::error::Result;
/// use kernel::opp::{Data, MicroVolt, Token};
/// use kernel::types::ARef;
/// use kernel::sync::aref::ARef;
///
/// fn create_opp(dev: &ARef<Device>, freq: Hertz, volt: MicroVolt, level: u32) -> Result<Token> {
/// let data = Data::new(freq, volt, level, false);
@ -211,7 +212,7 @@ impl Drop for Token {
/// use kernel::device::Device;
/// use kernel::error::Result;
/// use kernel::opp::{Data, MicroVolt, Token};
/// use kernel::types::ARef;
/// use kernel::sync::aref::ARef;
///
/// fn create_opp(dev: &ARef<Device>, freq: Hertz, volt: MicroVolt, level: u32) -> Result<Token> {
/// let data = Data::new(freq, volt, level, false);
@ -262,7 +263,7 @@ impl Data {
/// use kernel::clk::Hertz;
/// use kernel::error::Result;
/// use kernel::opp::{OPP, SearchType, Table};
/// use kernel::types::ARef;
/// use kernel::sync::aref::ARef;
///
/// fn find_opp(table: &Table, freq: Hertz) -> Result<ARef<OPP>> {
/// let opp = table.opp_from_freq(freq, Some(true), None, SearchType::Exact)?;
@ -335,7 +336,7 @@ impl Drop for ConfigToken {
/// use kernel::error::Result;
/// use kernel::opp::{Config, ConfigOps, ConfigToken};
/// use kernel::str::CString;
/// use kernel::types::ARef;
/// use kernel::sync::aref::ARef;
/// use kernel::macros::vtable;
///
/// #[derive(Default)]
@ -581,7 +582,7 @@ impl<T: ConfigOps + Default> Config<T> {
/// use kernel::device::Device;
/// use kernel::error::Result;
/// use kernel::opp::Table;
/// use kernel::types::ARef;
/// use kernel::sync::aref::ARef;
///
/// fn get_table(dev: &ARef<Device>, mask: &mut Cpumask, freq: Hertz) -> Result<Table> {
/// let mut opp_table = Table::from_of_cpumask(dev, mask)?;