From 8d11fc9d0d25246fc69f45904138f6eddbe09074 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Tue, 6 Aug 2013 16:02:17 -0400 Subject: [PATCH 01/35] CPUFREQ: Add 7 Governors --- drivers/cpufreq/Kconfig | 390 ++++++++++++++++++++++++++++------------ 1 file changed, 274 insertions(+), 116 deletions(-) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 5bd1018c..9b3eda6c 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -50,41 +50,44 @@ choice This option sets which CPUFreq governor shall be loaded at startup. If in doubt, select 'performance'. -config CPU_FREQ_DEFAULT_GOV_PERFORMANCE - bool "performance" +config CPU_FREQ_DEFAULT_GOV_ABYSSPLUG + bool "abyssplug" + select CPU_FREQ_GOV_ABYSSPLUG select CPU_FREQ_GOV_PERFORMANCE + ---help--- + Use the CPUFreq governor 'abyssplug' as default. This allows you + to get a full dynamic frequency capable system with CPU + hotplug support by simply loading your cpufreq low-level + hardware driver. Be aware that not all cpufreq drivers + support the hotplug governor. If unsure have a look at + the help section of the driver. Fallback governor will be the + performance governor. + +config CPU_FREQ_DEFAULT_GOV_ADAPTIVE + bool "adaptive" + select CPU_FREQ_GOV_ADAPTIVE help - Use the CPUFreq governor 'performance' as default. This sets - the frequency statically to the highest frequency supported by - the CPU. - -config CPU_FREQ_DEFAULT_GOV_POWERSAVE - bool "powersave" - depends on EXPERT - select CPU_FREQ_GOV_POWERSAVE - help - Use the CPUFreq governor 'powersave' as default. This sets - the frequency statically to the lowest frequency supported by - the CPU. + Use the CPUFreq governor 'adaptive' as default. This allows + you to get a full dynamic cpu frequency capable system by simply + loading your cpufreq low-level hardware driver, using the + 'adaptive' governor for latency-sensitive workloads and demanding + performance. -config CPU_FREQ_DEFAULT_GOV_USERSPACE - bool "userspace" - select CPU_FREQ_GOV_USERSPACE - help - Use the CPUFreq governor 'userspace' as default. This allows - you to set the CPU frequency manually or when a userspace - program shall be able to set the CPU dynamically without having - to enable the userspace governor manually. +config CPU_FREQ_DEFAULT_GOV_ASSWAX + bool "asswax" + select CPU_FREQ_GOV_ASSWAX + help + Use as default governor -config CPU_FREQ_DEFAULT_GOV_ONDEMAND - bool "ondemand" - select CPU_FREQ_GOV_ONDEMAND +config CPU_FREQ_DEFAULT_GOV_BADASS + bool "badass" + select CPU_FREQ_GOV_BADASS select CPU_FREQ_GOV_PERFORMANCE help - Use the CPUFreq governor 'ondemand' as default. This allows + Use the CPUFreq governor 'badass' as default. This allows you to get a full dynamic frequency capable system by simply loading your cpufreq low-level hardware driver. - Be aware that not all cpufreq drivers support the ondemand + Be aware that not all cpufreq drivers support the badass governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. @@ -119,6 +122,11 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. +config CPU_FREQ_DEFAULT_GOV_DANCEDANCE + bool "dancedance" + select CPU_FREQ_GOV_DANCEDANCE + help + config CPU_FREQ_DEFAULT_GOV_INTERACTIVE bool "interactive" select CPU_FREQ_GOV_INTERACTIVE @@ -140,92 +148,91 @@ config CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND bool "intellidemand" select CPU_FREQ_GOV_INTELLIDEMAND help - Use the CPUFreq governor 'intellidemand' as default. - - -endchoice + Use the CPUFreq governor 'intellidemand' as default. This is + based on Ondemand with browsing detection based on GPU loading -config CPU_FREQ_GOV_PERFORMANCE - tristate "'performance' governor" +config CPU_FREQ_DEFAULT_GOV_KTOONSERVATIVEQ + bool "ktoonservativeq" + select CPU_FREQ_GOV_KTOONSERVATIVEQ + select CPU_FREQ_GOV_PERFORMANCE help - This cpufreq governor sets the frequency statically to the - highest available CPU frequency. - - To compile this driver as a module, choose M here: the - module will be called cpufreq_performance. + Use the CPUFreq governor 'ktoonservativeq' as default. This allows + you to get a full dynamic frequency capable system by simply + loading your cpufreq low-level hardware driver. + Be aware that not all cpufreq drivers support the ktoonservativeq + governor. If unsure have a look at the help section of the + driver. Fallback governor will be the performance governor. This + governor adds the capability of hotpluging. - If in doubt, say Y. +config CPU_FREQ_DEFAULT_GOV_NIGHTMARE + bool "nightmare" + select CPU_FREQ_GOV_NIGHTMARE + help -config CPU_FREQ_GOV_BADASS - tristate "'badass' cpufreq governor" - depends on CPU_FREQ +config CPU_FREQ_DEFAULT_GOV_ONDEMAND + bool "ondemand" + select CPU_FREQ_GOV_ONDEMAND + select CPU_FREQ_GOV_PERFORMANCE help - 'badass' - This driver adds a dynamic cpufreq policy governor. - The governor does a periodic polling and - changes frequency based on the CPU utilization. - The support for this governor depends on CPU capability to - do fast frequency switching (i.e, very low latency frequency - transitions). - - If in doubt, say N. + Use the CPUFreq governor 'ondemand' as default. This allows + you to get a full dynamic frequency capable system by simply + loading your cpufreq low-level hardware driver. + Be aware that not all cpufreq drivers support the ondemand + governor. If unsure have a look at the help section of the + driver. Fallback governor will be the performance governor. -config CPU_FREQ_GOV_BADASS_2_PHASE - tristate "'2-phase' power-efficiency badass algorithm" - depends on CPU_FREQ_GOV_BADASS +config CPU_FREQ_DEFAULT_GOV_PEGASUSQ + bool "pegasusq" + select CPU_FREQ_GOV_PEGASUSQ help - '2-phase' - This driver adds a new algo to save power - -config CPU_FREQ_GOV_BADASS_2_PHASE_FREQ - int "'2-phase' badass frequency" - default 918000 - depends on CPU_FREQ_GOV_BADASS - depends on CPU_FREQ_GOV_BADASS_2_PHASE + Use the CPUFreq governor 'pegasusq' as default. -config CPU_FREQ_GOV_BADASS_3_PHASE - tristate "'3-phase' power-efficiency badass algorithm" - depends on CPU_FREQ_GOV_BADASS - depends on CPU_FREQ_GOV_BADASS_2_PHASE +config CPU_FREQ_DEFAULT_GOV_SLP + bool "slp" + select CPU_FREQ_GOV_SLP help - '3-phase' - This driver adds a new algo to save power + Use the CPUFreq governor 'slp' as default. -config CPU_FREQ_GOV_BADASS_3_PHASE_FREQ - int "'3-phase' badass frequency" - default 1188000 - depends on CPU_FREQ_GOV_BADASS - depends on CPU_FREQ_GOV_BADASS_2_PHASE - depends on CPU_FREQ_GOV_BADASS_3_PHASE - -config CPU_FREQ_GOV_BADASS_GPU_CONTROL - tristate "'gpu_control' power-efficiency badass algorithm" - depends on CPU_FREQ_GOV_BADASS - depends on CPU_FREQ_GOV_BADASS_2_PHASE +config CPU_FREQ_DEFAULT_GOV_PERFORMANCE + bool "performance" + select CPU_FREQ_GOV_PERFORMANCE help - 'gpu_control' - This driver adds a new algo to save power + Use the CPUFreq governor 'performance' as default. This sets + the frequency statically to the highest frequency supported by + the CPU. -config CPU_FREQ_GOV_BADASS_LOWBAT_POWERSAVE - tristate "'lowbat_powersave' power-efficiency badass algorithm" - depends on CPU_FREQ_GOV_BADASS +config CPU_FREQ_DEFAULT_GOV_POWERSAVE + bool "powersave" + depends on EXPERT + select CPU_FREQ_GOV_POWERSAVE help - 'lowbat_powersave' - This driver adds a new algo to save power + Use the CPUFreq governor 'powersave' as default. This sets + the frequency statically to the lowest frequency supported by + the CPU. -config CPU_FREQ_GOV_BADASS_ALLOW_BYPASS - tristate "Allows bypassing phases" - depends on CPU_FREQ_GOV_BADASS - depends on CPU_FREQ_GOV_BADASS_2_PHASE +config CPU_FREQ_DEFAULT_GOV_SMARTASSH3 + bool "smartassH3" + select CPU_FREQ_GOV_SMARTASSH3 help - 'allow_bypass' - This driver adds a bypass to the phases - + Use the CPUFreq governor 'slp' as default. -config CPU_FREQ_GOV_POWERSAVE - tristate "'powersave' governor" +config CPU_FREQ_DEFAULT_GOV_USERSPACE + bool "userspace" + select CPU_FREQ_GOV_USERSPACE help - This cpufreq governor sets the frequency statically to the - lowest available CPU frequency. + Use the CPUFreq governor 'userspace' as default. This allows + you to set the CPU frequency manually or when a userspace + program shall be able to set the CPU dynamically without having + to enable the userspace governor manually. - To compile this driver as a module, choose M here: the - module will be called cpufreq_powersave. +config CPU_FREQ_DEFAULT_GOV_WHEATLEY + bool "wheatley" + select CPU_FREQ_GOV_WHEATLEY + select CPU_FREQ_GOV_PERFORMANCE + ---help--- + Use the CPUFreq governor 'wheatley' as default. - If in doubt, say Y. +endchoice config CPU_FREQ_GOV_LIONHEART tristate "lionheart" @@ -233,12 +240,42 @@ config CPU_FREQ_GOV_LIONHEART help Use the CPUFreq governor 'lionheart' as default. +config CPU_FREQ_GOV_ABYSSPLUG + tristate "'abyssplug' cpufreq governor" + depends on CPU_FREQ && NO_HZ && HOTPLUG_CPU + ---help--- + 'abyssplug' - this driver mimics the frequency scaling behavior + in 'ondemand', but with several key differences. First is + that frequency transitions use the CPUFreq table directly, + instead of incrementing in a percentage of the maximum + available frequency. Second 'abyssplug' will offline auxillary + CPUs when the system is idle, and online those CPUs once the + system becomes busy again. This last feature is needed for + architectures which transition to low power states when only + the "master" CPU is online, or for thermally constrained + devices. + If you don't have one of these architectures or devices, use + 'ondemand' instead. + If in doubt, say N. config CPU_FREQ_GOV_SMARTASSV2 tristate "smartassv2" depends on CPU_FREQ help Use the CPUFreq governor 'smartassv2' as default. +config CPU_FREQ_GOV_ADAPTIVE + tristate "'adaptive' cpufreq policy governor" + help + 'adaptive' - This driver adds a dynamic cpufreq policy governor + designed for latency-sensitive workloads and also for demanding + performance. + + This governor attempts to reduce the latency of clock + increases so that the system is more responsive to + interactive workloads in loweset steady-state but to + to reduce power consumption in middle operation level level up + will be done in step by step to prohibit system from going to + max operation level. config CPU_FREQ_GOV_USERSPACE tristate "'userspace' governor for userspace frequency scaling" @@ -249,35 +286,57 @@ config CPU_FREQ_GOV_USERSPACE . To compile this driver as a module, choose M here: the - module will be called cpufreq_userspace. + module will be called cpufreq_adaptive. - For details, take a look at . + For details, take a look at linux/Documentation/cpu-freq. - If in doubt, say Y. + If in doubt, say N. -config CPU_FREQ_GOV_ONDEMAND - tristate "'ondemand' cpufreq policy governor" +config CPU_FREQ_GOV_ASSWAX + tristate "'asswax' cpufreq governor" + depends on CPU_FREQ + help + Use as default governors + +config CPU_FREQ_GOV_BADASS + tristate "'badass' cpufreq policy governor" select CPU_FREQ_TABLE help - 'ondemand' - This driver adds a dynamic cpufreq policy governor. - The governor does a periodic polling and + 'badass' - This driver adds a dynamic cpufreq policy governor. + The governor does a periodic polling and changes frequency based on the CPU utilization. The support for this governor depends on CPU capability to - do fast frequency switching (i.e, very low latency frequency - transitions). + do fast frequency switching (i.e, very low latency frequency transitions). + To compile this driver as a module, choose M here: the + module will be called cpufreq_badass. + If in doubt, say N. + +config CPU_FREQ_GOV_CONSERVATIVE + tristate "'conservative' cpufreq governor" + depends on CPU_FREQ + help + 'conservative' - this driver is rather similar to the 'ondemand' + governor both in its source code and its purpose, the difference is + its optimisation for better suitability in a battery powered + environment. The frequency is gracefully increased and decreased + rather than jumping to 100% when speed is required. + + If you have a desktop machine then you should really be considering + the 'ondemand' governor instead, however if you are using a laptop, + PDA or even an AMD64 based computer (due to the unacceptable + step-by-step latency issues between the minimum and maximum frequency + transitions in the CPU) you will probably want to use this governor. To compile this driver as a module, choose M here: the - module will be called cpufreq_ondemand. + module will be called cpufreq_conservative. For details, take a look at linux/Documentation/cpu-freq. If in doubt, say N. -config CPU_FREQ_GOV_ONDEMAND_2_PHASE - tristate "'2-phase' power-efficiency ondemand algorithm" - depends on CPU_FREQ_GOV_ONDEMAND - help - '2-phase' - This driver adds a new algo to save power +config CPU_FREQ_GOV_DANCEDANCE + tristate "'dancedance' cpufreq governor" + depends on CPU_FREQ config CPU_FREQ_GOV_INTERACTIVE tristate "'interactive' cpufreq policy governor" @@ -296,11 +355,29 @@ config CPU_FREQ_GOV_INTERACTIVE If in doubt, say N. -config CPU_FREQ_GOV_CONSERVATIVE - tristate "'conservative' cpufreq governor" +config CPU_FREQ_GOV_INTELLIDEMAND + tristate "'intellidemand' cpufreq policy governor" + select CPU_FREQ_TABLE + help + 'intellidemand' - This driver adds a dynamic cpufreq policy governor. + The governor does a periodic polling and + changes frequency based on the CPU utilization. + The support for this governor depends on CPU capability to + do fast frequency switching (i.e, very low latency frequency + transitions). with browsing detection based on GPU loading + + To compile this driver as a module, choose M here: the + module will be called cpufreq_ondemand. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +config CPU_FREQ_GOV_KTOONSERVATIVEQ + tristate "'ktoonservativeq' cpufreq governor" depends on CPU_FREQ help - 'conservative' - this driver is rather similar to the 'ondemand' + 'ktoonservativeq' - this driver is rather similar to the 'ondemand' governor both in its source code and its purpose, the difference is its optimisation for better suitability in a battery powered environment. The frequency is gracefully increased and decreased @@ -311,18 +388,99 @@ config CPU_FREQ_GOV_CONSERVATIVE PDA or even an AMD64 based computer (due to the unacceptable step-by-step latency issues between the minimum and maximum frequency transitions in the CPU) you will probably want to use this governor. + This governor adds the capability of hotpluging. To compile this driver as a module, choose M here: the - module will be called cpufreq_conservative. + module will be called cpufreq_ktoonservativeq. For details, take a look at linux/Documentation/cpu-freq. If in doubt, say N. -config CPU_FREQ_GOV_INTELLIDEMAND - tristate "'intellidemand' cpufreq governor" - depends on CPU_FREQ +config CPU_FREQ_GOV_NIGHTMARE + tristate "'nightmare' cpufreq governor" + depends on CPU_FREQ + +config CPU_FREQ_GOV_ONDEMAND + tristate "'ondemand' cpufreq policy governor" + select CPU_FREQ_TABLE + help + 'ondemand' - This driver adds a dynamic cpufreq policy governor. + The governor does a periodic polling and + changes frequency based on the CPU utilization. + The support for this governor depends on CPU capability to + do fast frequency switching (i.e, very low latency frequency + transitions). + + To compile this driver as a module, choose M here: the + module will be called cpufreq_ondemand. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +config CPU_FREQ_GOV_PERFORMANCE + tristate "'performance' governor" + help + This cpufreq governor sets the frequency statically to the + highest available CPU frequency. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_performance. + + If in doubt, say Y. + +config CPU_FREQ_GOV_PEGASUSQ + tristate "'pegasusq' cpufreq policy governor" + +config CPU_FREQ_GOV_POWERSAVE + tristate "'powersave' governor" + help + This cpufreq governor sets the frequency statically to the + lowest available CPU frequency. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_powersave. + + If in doubt, say Y. + +config CPU_FREQ_GOV_SLP + tristate "'slp' cpufreq policy governor" + +config CPU_FREQ_GOV_SMARTASSH3 + tristate "'smartassH3' cpufreq governor" + depends on CPU_FREQ + help + 'smartassH3' - a "smart" governor + +config CPU_FREQ_GOV_USERSPACE + tristate "'userspace' governor for userspace frequency scaling" + help + Enable this cpufreq governor when you either want to set the + CPU frequency manually or when a userspace program shall + be able to set the CPU dynamically, like on LART + . + + To compile this driver as a module, choose M here: the + module will be called cpufreq_userspace. + + For details, take a look at . + + If in doubt, say Y. + +config CPU_FREQ_GOV_WHEATLEY + tristate "'wheatley' cpufreq governor" + depends on CPU_FREQ + +config SEC_DVFS + bool "DVFS job" + default n + depends on CPU_FREQ +config SEC_DVFS_BOOSTER + bool "DVFS input booster" + default y + depends on SEC_DVFS menu "x86 CPU frequency scaling drivers" depends on X86 From a949a4efd2754c964de9ca2487021365d6aacf55 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Tue, 6 Aug 2013 16:09:49 -0400 Subject: [PATCH 02/35] CPUFREQ: Add Actual Governors (D3RP) --- drivers/cpufreq/cpufreq_abyssplug.c | 817 ++++++++++ drivers/cpufreq/cpufreq_adaptive.c | 952 ++++++++++++ drivers/cpufreq/cpufreq_asswax.c | 945 ++++++++++++ drivers/cpufreq/cpufreq_dancedance.c | 650 ++++++++ drivers/cpufreq/cpufreq_interactive.c | 1259 ++++++++++++++++ drivers/cpufreq/cpufreq_ktoonservativeq.c | 1608 ++++++++++++++++++++ drivers/cpufreq/cpufreq_nightmare.c | 1656 +++++++++++++++++++++ drivers/cpufreq/cpufreq_pegasusq.c | 1636 ++++++++++++++++++++ drivers/cpufreq/cpufreq_smartassH3.c | 904 +++++++++++ drivers/cpufreq/cpufreq_wheatley.c | 839 +++++++++++ 10 files changed, 11266 insertions(+) create mode 100644 drivers/cpufreq/cpufreq_abyssplug.c create mode 100644 drivers/cpufreq/cpufreq_adaptive.c create mode 100644 drivers/cpufreq/cpufreq_asswax.c create mode 100644 drivers/cpufreq/cpufreq_dancedance.c create mode 100644 drivers/cpufreq/cpufreq_interactive.c create mode 100644 drivers/cpufreq/cpufreq_ktoonservativeq.c create mode 100644 drivers/cpufreq/cpufreq_nightmare.c create mode 100644 drivers/cpufreq/cpufreq_pegasusq.c create mode 100644 drivers/cpufreq/cpufreq_smartassH3.c create mode 100644 drivers/cpufreq/cpufreq_wheatley.c diff --git a/drivers/cpufreq/cpufreq_abyssplug.c b/drivers/cpufreq/cpufreq_abyssplug.c new file mode 100644 index 00000000..37df4463 --- /dev/null +++ b/drivers/cpufreq/cpufreq_abyssplug.c @@ -0,0 +1,817 @@ +/* + * CPUFreq AbyssPlug governor + * + * + * Based on hotplug governor + * Copyright (C) 2010 Texas Instruments, Inc. + * Mike Turquette + * Santosh Shilimkar + * + * Based on ondemand governor + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi , + * Jun Nakajima + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* greater than 95% avg load across online CPUs increases frequency */ +#define DEFAULT_UP_FREQ_MIN_LOAD (95) + +/* Keep 10% of idle under the up threshold when decreasing the frequency */ +#define DEFAULT_FREQ_DOWN_DIFFERENTIAL (1) + +/* less than 40% avg load across online CPUs decreases frequency */ +#define DEFAULT_DOWN_FREQ_MAX_LOAD (40) + +/* default sampling period (uSec) is bogus; 10x ondemand's default for x86 */ +#define DEFAULT_SAMPLING_PERIOD (50000) + +/* default number of sampling periods to average before hotplug-in decision */ +#define DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS (5) + +/* default number of sampling periods to average before hotplug-out decision */ +#define DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS (20) + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); +//static int hotplug_boost(struct cpufreq_policy *policy); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ABYSSPLUG +static +#endif +struct cpufreq_governor cpufreq_gov_abyssplug = { + .name = "abyssplug", + .governor = cpufreq_governor_dbs, + .owner = THIS_MODULE, +}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct work_struct cpu_up_work; + struct work_struct cpu_down_work; + struct cpufreq_frequency_table *freq_table; + int cpu; + unsigned int boost_applied; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, hp_cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on + * different CPUs. It protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct workqueue_struct *khotplug_wq; + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int down_threshold; + unsigned int hotplug_in_sampling_periods; + unsigned int hotplug_out_sampling_periods; + unsigned int hotplug_load_index; + unsigned int *hotplug_load_history; + unsigned int ignore_nice; + unsigned int io_is_busy; + unsigned int boost_timeout; +} dbs_tuners_ins = { + .sampling_rate = DEFAULT_SAMPLING_PERIOD, + .up_threshold = DEFAULT_UP_FREQ_MIN_LOAD, + .down_differential = DEFAULT_FREQ_DOWN_DIFFERENTIAL, + .down_threshold = DEFAULT_DOWN_FREQ_MAX_LOAD, + .hotplug_in_sampling_periods = DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS, + .hotplug_out_sampling_periods = DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS, + .hotplug_load_index = 0, + .ignore_nice = 0, + .io_is_busy = 0, + .boost_timeout = 0, +}; + +/* + * A corner case exists when switching io_is_busy at run-time: comparing idle + * times from a non-io_is_busy period to an io_is_busy period (or vice-versa) + * will misrepresent the actual change in system idleness. We ignore this + * corner case: enabling io_is_busy might cause freq increase and disabling + * might cause freq decrease, which probably matches the original intent. + */ +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time; + u64 iowait_time; + + /* cpufreq-abyssplug always assumes CONFIG_NO_HZ */ + idle_time = get_cpu_idle_time_us(cpu, wall); + + /* add time spent doing I/O to idle time */ + if (dbs_tuners_ins.io_is_busy) { + iowait_time = get_cpu_iowait_time_us(cpu, wall); + /* cpufreq-abyssplug always assumes CONFIG_NO_HZ */ + if (iowait_time != -1ULL && idle_time >= iowait_time) + idle_time -= iowait_time; + } + + return idle_time; +} + +/************************** sysfs interface ************************/ + +/* XXX look at global sysfs macros in cpufreq.h, can those be used here? */ + +/* cpufreq_abyssplug Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(up_threshold, up_threshold); +show_one(down_differential, down_differential); +show_one(down_threshold, down_threshold); +show_one(hotplug_in_sampling_periods, hotplug_in_sampling_periods); +show_one(hotplug_out_sampling_periods, hotplug_out_sampling_periods); +show_one(ignore_nice_load, ignore_nice); +show_one(io_is_busy, io_is_busy); +show_one(boost_timeout, boost_timeout); + +static ssize_t store_boost_timeout(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.boost_timeout = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_rate = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input <= dbs_tuners_ins.down_threshold) { + return -EINVAL; + } + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.up_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_down_differential(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input >= dbs_tuners_ins.up_threshold) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.down_differential = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input >= dbs_tuners_ins.up_threshold) { + return -EINVAL; + } + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.down_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_hotplug_in_sampling_periods(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + unsigned int *temp; + unsigned int max_windows; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + /* already using this value, bail out */ + if (input == dbs_tuners_ins.hotplug_in_sampling_periods) + return count; + + mutex_lock(&dbs_mutex); + ret = count; + max_windows = max(dbs_tuners_ins.hotplug_in_sampling_periods, + dbs_tuners_ins.hotplug_out_sampling_periods); + + /* no need to resize array */ + if (input <= max_windows) { + dbs_tuners_ins.hotplug_in_sampling_periods = input; + goto out; + } + + /* resize array */ + temp = kmalloc((sizeof(unsigned int) * input), GFP_KERNEL); + + if (!temp || IS_ERR(temp)) { + ret = -ENOMEM; + goto out; + } + + memcpy(temp, dbs_tuners_ins.hotplug_load_history, + (max_windows * sizeof(unsigned int))); + kfree(dbs_tuners_ins.hotplug_load_history); + + /* replace old buffer, old number of sampling periods & old index */ + dbs_tuners_ins.hotplug_load_history = temp; + dbs_tuners_ins.hotplug_in_sampling_periods = input; + dbs_tuners_ins.hotplug_load_index = max_windows; +out: + mutex_unlock(&dbs_mutex); + + return ret; +} + +static ssize_t store_hotplug_out_sampling_periods(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + unsigned int *temp; + unsigned int max_windows; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + /* already using this value, bail out */ + if (input == dbs_tuners_ins.hotplug_out_sampling_periods) + return count; + + mutex_lock(&dbs_mutex); + ret = count; + max_windows = max(dbs_tuners_ins.hotplug_in_sampling_periods, + dbs_tuners_ins.hotplug_out_sampling_periods); + + /* no need to resize array */ + if (input <= max_windows) { + dbs_tuners_ins.hotplug_out_sampling_periods = input; + goto out; + } + + /* resize array */ + temp = kmalloc((sizeof(unsigned int) * input), GFP_KERNEL); + + if (!temp || IS_ERR(temp)) { + ret = -ENOMEM; + goto out; + } + + memcpy(temp, dbs_tuners_ins.hotplug_load_history, + (max_windows * sizeof(unsigned int))); + kfree(dbs_tuners_ins.hotplug_load_history); + + /* replace old buffer, old number of sampling periods & old index */ + dbs_tuners_ins.hotplug_load_history = temp; + dbs_tuners_ins.hotplug_out_sampling_periods = input; + dbs_tuners_ins.hotplug_load_index = max_windows; +out: + mutex_unlock(&dbs_mutex); + + return ret; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + mutex_lock(&dbs_mutex); + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + mutex_unlock(&dbs_mutex); + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(hp_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + + } + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.io_is_busy = !!input; + mutex_unlock(&dbs_mutex); + + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(up_threshold); +define_one_global_rw(down_differential); +define_one_global_rw(down_threshold); +define_one_global_rw(hotplug_in_sampling_periods); +define_one_global_rw(hotplug_out_sampling_periods); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(io_is_busy); +define_one_global_rw(boost_timeout); + +static struct attribute *dbs_attributes[] = { + &sampling_rate.attr, + &up_threshold.attr, + &down_differential.attr, + &down_threshold.attr, + &hotplug_in_sampling_periods.attr, + &hotplug_out_sampling_periods.attr, + &ignore_nice_load.attr, + &io_is_busy.attr, + &boost_timeout.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "abyssplug", +}; + +/************************** sysfs end ************************/ + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + /* combined load of all enabled CPUs */ + unsigned int total_load = 0; + /* single largest CPU load percentage*/ + unsigned int max_load = 0; + /* largest CPU load in terms of frequency */ + unsigned int max_load_freq = 0; + /* average load across all enabled CPUs */ + unsigned int avg_load = 0; + /* average load across multiple sampling periods for hotplug events */ + unsigned int hotplug_in_avg_load = 0; + unsigned int hotplug_out_avg_load = 0; + /* number of sampling periods averaged for hotplug decisions */ + unsigned int periods; + + struct cpufreq_policy *policy; + unsigned int i, j; + + policy = this_dbs_info->cur_policy; + + /* + * cpu load accounting + * get highest load, total load and average load across all CPUs + */ + for_each_cpu(j, policy->cpus) { + unsigned int load; + unsigned int idle_time, wall_time; + cputime64_t cur_wall_time, cur_idle_time; + struct cpu_dbs_info_s *j_dbs_info; + + j_dbs_info = &per_cpu(hp_cpu_dbs_info, j); + + /* update both cur_idle_time and cur_wall_time */ + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + + /* how much wall time has passed since last iteration? */ + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + /* how much idle time has passed since last iteration? */ + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + /* load is the percentage of time not spent in idle */ + load = 100 * (wall_time - idle_time) / wall_time; + + /* keep track of combined load across all CPUs */ + total_load += load; + + /* keep track of highest single load across all CPUs */ + if (load > max_load) + max_load = load; + } + + /* use the max load in the OPP freq change policy */ + max_load_freq = max_load * policy->cur; + + /* calculate the average load across all related CPUs */ + avg_load = total_load / num_online_cpus(); + + mutex_lock(&dbs_mutex); + + /* + * hotplug load accounting + * average load over multiple sampling periods + */ + + /* how many sampling periods do we use for hotplug decisions? */ + periods = max(dbs_tuners_ins.hotplug_in_sampling_periods, + dbs_tuners_ins.hotplug_out_sampling_periods); + + /* store avg_load in the circular buffer */ + dbs_tuners_ins.hotplug_load_history[dbs_tuners_ins.hotplug_load_index] + = avg_load; + + /* compute average load across in & out sampling periods */ + for (i = 0, j = dbs_tuners_ins.hotplug_load_index; + i < periods; i++, j--) { + if (i < dbs_tuners_ins.hotplug_in_sampling_periods) + hotplug_in_avg_load += + dbs_tuners_ins.hotplug_load_history[j]; + if (i < dbs_tuners_ins.hotplug_out_sampling_periods) + hotplug_out_avg_load += + dbs_tuners_ins.hotplug_load_history[j]; + + if (j == 0) + j = periods; + } + + hotplug_in_avg_load = hotplug_in_avg_load / + dbs_tuners_ins.hotplug_in_sampling_periods; + + hotplug_out_avg_load = hotplug_out_avg_load / + dbs_tuners_ins.hotplug_out_sampling_periods; + + /* return to first element if we're at the circular buffer's end */ + if (++dbs_tuners_ins.hotplug_load_index == periods) + dbs_tuners_ins.hotplug_load_index = 0; + + /* check if auxiliary CPU is needed based on avg_load */ + if (avg_load > dbs_tuners_ins.up_threshold) { + /* should we enable auxillary CPUs? */ + if (num_online_cpus() < 2 && hotplug_in_avg_load > + dbs_tuners_ins.up_threshold) { + queue_work_on(this_dbs_info->cpu, khotplug_wq, + &this_dbs_info->cpu_up_work); + goto out; + } + } + + /* check for frequency increase based on max_load */ + if (max_load > dbs_tuners_ins.up_threshold) { + /* increase to highest frequency supported */ + if (policy->cur < policy->max) + __cpufreq_driver_target(policy, policy->max, + CPUFREQ_RELATION_H); + + goto out; + } + + /* check for frequency decrease */ + if (avg_load < dbs_tuners_ins.down_threshold) { + /* are we at the minimum frequency already? */ + if (policy->cur <= policy->min) { + /* should we disable auxillary CPUs? */ + if (num_online_cpus() > 1 && hotplug_out_avg_load < + dbs_tuners_ins.down_threshold) { + queue_work_on(this_dbs_info->cpu, khotplug_wq, + &this_dbs_info->cpu_down_work); + } + goto out; + } + } + + /* + * go down to the lowest frequency which can sustain the load by + * keeping 30% of idle in order to not cross the up_threshold + */ + if ((max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) && (policy->cur > policy->min)) { + unsigned int freq_next; + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + if (freq_next < policy->min) + freq_next = policy->min; + + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } +out: + mutex_unlock(&dbs_mutex); + return; +} + +static void __cpuinit do_cpu_up(struct work_struct *work) +{ + cpu_up(1); +} + +static void __cpuinit do_cpu_down(struct work_struct *work) +{ + cpu_down(1); +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int delay = 0; + + mutex_lock(&dbs_info->timer_mutex); + if (!dbs_info->boost_applied) { + dbs_check_cpu(dbs_info); + /* We want all related CPUs to do sampling nearly on same jiffy */ + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + } else { + delay = usecs_to_jiffies(dbs_tuners_ins.boost_timeout); + dbs_info->boost_applied = 0; + if (num_online_cpus() < 2) + queue_work_on(cpu, khotplug_wq, + &dbs_info->cpu_up_work); + } + queue_delayed_work_on(cpu, khotplug_wq, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all related CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + delay -= jiffies % delay; + + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + if (!dbs_info->boost_applied) + delay = usecs_to_jiffies(dbs_tuners_ins.boost_timeout); + queue_delayed_work_on(dbs_info->cpu, khotplug_wq, &dbs_info->work, + delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int i, j, max_periods; + int rc; + + this_dbs_info = &per_cpu(hp_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(hp_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + + max_periods = max(DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS, + DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS); + dbs_tuners_ins.hotplug_load_history = kmalloc( + (sizeof(unsigned int) * max_periods), + GFP_KERNEL); + if (!dbs_tuners_ins.hotplug_load_history) { + WARN_ON(1); + return -ENOMEM; + } + for (i = 0; i < max_periods; i++) + dbs_tuners_ins.hotplug_load_history[i] = 50; + } + this_dbs_info->cpu = cpu; + this_dbs_info->freq_table = cpufreq_frequency_get_table(cpu); + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + } + if (!dbs_tuners_ins.boost_timeout) + dbs_tuners_ins.boost_timeout = dbs_tuners_ins.sampling_rate * 30; + mutex_unlock(&dbs_mutex); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + mutex_destroy(&this_dbs_info->timer_mutex); + dbs_enable--; + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + kfree(dbs_tuners_ins.hotplug_load_history); + /* + * XXX BIG CAVEAT: Stopping the governor with CPU1 offline + * will result in it remaining offline until the user onlines + * it again. It is up to the user to do this (for now). + */ + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +#if 0 +static int hotplug_boost(struct cpufreq_policy *policy) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + + this_dbs_info = &per_cpu(hp_cpu_dbs_info, cpu); + +#if 0 + /* Already at max? */ + if (policy->cur == policy->max) + return; +#endif + + mutex_lock(&this_dbs_info->timer_mutex); + this_dbs_info->boost_applied = 1; + __cpufreq_driver_target(policy, policy->max, + CPUFREQ_RELATION_H); + mutex_unlock(&this_dbs_info->timer_mutex); + + return 0; +} +#endif + +static int __init cpufreq_gov_dbs_init(void) +{ + int err; + cputime64_t wall; + u64 idle_time; + int cpu = get_cpu(); + struct cpu_dbs_info_s *dbs_info = &per_cpu(hp_cpu_dbs_info, 0); + + INIT_WORK(&dbs_info->cpu_up_work, do_cpu_up); + INIT_WORK(&dbs_info->cpu_down_work, do_cpu_down); + + idle_time = get_cpu_idle_time_us(cpu, &wall); + put_cpu(); + if (idle_time != -1ULL) { + dbs_tuners_ins.up_threshold = DEFAULT_UP_FREQ_MIN_LOAD; + } else { + pr_err("cpufreq-abyssplug: %s: assumes CONFIG_NO_HZ\n", + __func__); + return -EINVAL; + } + + khotplug_wq = create_workqueue("khotplug"); + if (!khotplug_wq) { + pr_err("Creation of khotplug failed\n"); + return -EFAULT; + } + err = cpufreq_register_governor(&cpufreq_gov_abyssplug); + if (err) + destroy_workqueue(khotplug_wq); + + return err; +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_abyssplug); + destroy_workqueue(khotplug_wq); +} + +MODULE_DESCRIPTION("'cpufreq_abyssplug' - cpufreq governor for dynamic frequency scaling and CPU hotplug"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ABYSSPLUG +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); + diff --git a/drivers/cpufreq/cpufreq_adaptive.c b/drivers/cpufreq/cpufreq_adaptive.c new file mode 100644 index 00000000..2eff3e28 --- /dev/null +++ b/drivers/cpufreq/cpufreq_adaptive.c @@ -0,0 +1,952 @@ +/* + * drivers/cpufreq/cpufreq_adaptive.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) +#define DEF_FREQUENCY_UP_THRESHOLD (80) +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +#define MICRO_FREQUENCY_UP_THRESHOLD (95) +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) +#define MIN_ONDEMAND_THRESHOLD (4) +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. + * All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +static void (*pm_idle_old)(void); +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ADAPTIVE +static +#endif +struct cpufreq_governor cpufreq_gov_adaptive = { + .name = "adaptive", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_hi_jiffies; + int cpu; + unsigned int sample_type:1; + bool ondemand; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on + * different CPUs. It protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); +static struct task_struct *up_task; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_down_work; +static cpumask_t up_cpumask; +static spinlock_t up_cpumask_lock; +static cpumask_t down_cpumask; +static spinlock_t down_cpumask_lock; + +static DEFINE_PER_CPU(cputime64_t, idle_in_idle); +static DEFINE_PER_CPU(cputime64_t, idle_exit_wall); + +static struct timer_list cpu_timer; +static unsigned int target_freq; +static DEFINE_MUTEX(short_timer_mutex); + +/* Go to max speed when CPU load at or above this value. */ +#define DEFAULT_GO_MAXSPEED_LOAD 60 +static unsigned long go_maxspeed_load; + +#define DEFAULT_KEEP_MINSPEED_LOAD 30 +static unsigned long keep_minspeed_load; + +#define DEFAULT_STEPUP_LOAD 10 +static unsigned long step_up_load; + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int ignore_nice; + unsigned int io_is_busy; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .ignore_nice = 0, +}; + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +static void adaptive_init_cpu(int cpu) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_max(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + printk_once(KERN_INFO "CPUFREQ: adaptive sampling_rate_max " + "sysfs file is deprecated - used by: %s\n", current->comm); + return sprintf(buf, "%u\n", -1U); +} + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_max); +define_one_global_ro(sampling_rate_min); + +/* cpufreq_adaptive Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(ignore_nice_load, ignore_nice); + +/*** delete after deprecation time ***/ + +#define DEPRECATION_MSG(file_name) \ + printk_once(KERN_INFO "CPUFREQ: Per core adaptive sysfs " \ + "interface is deprecated - " #file_name "\n"); + +#define show_one_old(file_name) \ +static ssize_t show_##file_name##_old \ +(struct cpufreq_policy *unused, char *buf) \ +{ \ + printk_once(KERN_INFO "CPUFREQ: Per core adaptive sysfs " \ + "interface is deprecated - " #file_name "\n"); \ + return show_##file_name(NULL, NULL, buf); \ +} + +/*** delete after deprecation time ***/ + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.io_is_busy = !!input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.up_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + mutex_lock(&dbs_mutex); + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + mutex_unlock(&dbs_mutex); + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time_us(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + + } + mutex_unlock(&dbs_mutex); + + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(ignore_nice_load); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_max.attr, + &sampling_rate_min.attr, + &sampling_rate.attr, + &up_threshold.attr, + &ignore_nice_load.attr, + &io_is_busy.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "adaptive", +}; + +/*** delete after deprecation time ***/ + +#define write_one_old(file_name) \ +static ssize_t store_##file_name##_old \ +(struct cpufreq_policy *unused, const char *buf, size_t count) \ +{ \ + printk_once(KERN_INFO "CPUFREQ: Per core adaptive sysfs " \ + "interface is deprecated - " #file_name "\n"); \ + return store_##file_name(NULL, NULL, buf, count); \ +} + +static void cpufreq_adaptive_timer(unsigned long data) +{ + cputime64_t cur_idle; + cputime64_t cur_wall; + unsigned int delta_idle; + unsigned int delta_time; + int short_load; + unsigned int new_freq; + unsigned long flags; + struct cpu_dbs_info_s *this_dbs_info; + struct cpufreq_policy *policy; + unsigned int j; + unsigned int index; + unsigned int max_load = 0; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); + + policy = this_dbs_info->cur_policy; + + for_each_online_cpu(j) { + cur_idle = get_cpu_idle_time_us(j, &cur_wall); + + delta_idle = (unsigned int) cputime64_sub(cur_idle, + per_cpu(idle_in_idle, j)); + delta_time = (unsigned int) cputime64_sub(cur_wall, + per_cpu(idle_exit_wall, j)); + + /* + * If timer ran less than 1ms after short-term sample started, retry. + */ + if (delta_time < 1000) + goto do_nothing; + + if (delta_idle > delta_time) + short_load = 0; + else + short_load = 100 * (delta_time - delta_idle) / delta_time; + + if (short_load > max_load) + max_load = short_load; + } + + if (this_dbs_info->ondemand) + goto do_nothing; + + if (max_load >= go_maxspeed_load) + new_freq = policy->max; + else + new_freq = policy->max * max_load / 100; + + if ((max_load <= keep_minspeed_load) && + (policy->cur == policy->min)) + new_freq = policy->cur; + + if (cpufreq_frequency_table_target(policy, this_dbs_info->freq_table, + new_freq, CPUFREQ_RELATION_L, + &index)) { + goto do_nothing; + } + + new_freq = this_dbs_info->freq_table[index].frequency; + + target_freq = new_freq; + + if (new_freq < this_dbs_info->cur_policy->cur) { + spin_lock_irqsave(&down_cpumask_lock, flags); + cpumask_set_cpu(0, &down_cpumask); + spin_unlock_irqrestore(&down_cpumask_lock, flags); + queue_work(down_wq, &freq_scale_down_work); + } else { + spin_lock_irqsave(&up_cpumask_lock, flags); + cpumask_set_cpu(0, &up_cpumask); + spin_unlock_irqrestore(&up_cpumask_lock, flags); + wake_up_process(up_task); + } + + return; + +do_nothing: + for_each_online_cpu(j) { + per_cpu(idle_in_idle, j) = + get_cpu_idle_time_us(j, + &per_cpu(idle_exit_wall, j)); + } + mod_timer(&cpu_timer, jiffies + 2); + schedule_delayed_work_on(0, &this_dbs_info->work, 10); + + if (mutex_is_locked(&short_timer_mutex)) + mutex_unlock(&short_timer_mutex); + return; +} + +/*** delete after deprecation time ***/ + +/************************** sysfs end ************************/ + +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ +#ifndef CONFIG_ARCH_EXYNOS4 + if (p->cur == p->max) + return; +#endif + __cpufreq_driver_target(p, freq, CPUFREQ_RELATION_H); +} + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + unsigned int max_load_freq; + + struct cpufreq_policy *policy; + unsigned int j; + + unsigned int index, new_freq; + unsigned int longterm_load = 0; + + policy = this_dbs_info->cur_policy; + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate, we look for a the lowest + * frequency which can sustain the load while keeping idle time over + * 30%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of current frequency + */ + + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load, load_freq; + int freq_avg; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time_us(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, + j_dbs_info->prev_cpu_iowait); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + /* + * For the purpose of adaptive, waiting for disk IO is an + * indication that you're performance critical, and not that + * the system is actually idle. So subtract the iowait time + * from the cpu idle time. + */ + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + if (load > longterm_load) + longterm_load = load; + + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = load * freq_avg; + + if (load_freq > max_load_freq) + max_load_freq = load_freq; + } + + if (longterm_load >= MIN_ONDEMAND_THRESHOLD) + this_dbs_info->ondemand = true; + else + this_dbs_info->ondemand = false; + + /* Check for frequency increase */ + if (max_load_freq > (dbs_tuners_ins.up_threshold * policy->cur)) { + cpufreq_frequency_table_target(policy, + this_dbs_info->freq_table, + (policy->cur + step_up_load), + CPUFREQ_RELATION_L, &index); + + new_freq = this_dbs_info->freq_table[index].frequency; + dbs_freq_increase(policy, new_freq); + return; + } + + /* Check for frequency decrease */ + /* if we cannot reduce the frequency anymore, break out early */ +#ifndef CONFIG_ARCH_EXYNOS4 + if (policy->cur == policy->min) + return; +#endif + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + if (freq_next < policy->min) + freq_next = policy->min; + + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + + int delay; + + mutex_lock(&dbs_info->timer_mutex); + + /* Common NORMAL_SAMPLE setup */ + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + dbs_check_cpu(dbs_info); + + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + schedule_delayed_work_on(cpu, &dbs_info->work, delay); + + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); +} + +/* + * Not all CPUs want IO time to be accounted as busy; this dependson how + * efficient idling at a higher frequency/voltage is. + * Pavel Machek says this is not so for various generations of AMD and old + * Intel systems. + * Mike Chan (androidlcom) calis this is also not true for ARM. + * Because of this, whitelist specific known (series) of CPUs by default, and + * leave all others up to the user. + */ +static int should_io_be_busy(void) +{ +#if defined(CONFIG_X86) + /* + * For Intel, Core 2 (model 15) andl later have an efficient idle. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model >= 15) + return 1; +#endif + return 0; +} + +static void cpufreq_adaptive_idle(void) +{ + int i; + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, 0); + struct cpufreq_policy *policy; + + policy = dbs_info->cur_policy; + + pm_idle_old(); + + if ((policy->cur == policy->min) || + (policy->cur == policy->max)) { + + if (timer_pending(&cpu_timer)) + return; + + if (mutex_trylock(&short_timer_mutex)) { + for_each_online_cpu(i) { + per_cpu(idle_in_idle, i) = + get_cpu_idle_time_us(i, + &per_cpu(idle_exit_wall, i)); + } + + mod_timer(&cpu_timer, jiffies + 2); + cancel_delayed_work(&dbs_info->work); + } + } else { + if (timer_pending(&cpu_timer)) + del_timer(&cpu_timer); + + } +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + + rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time_us(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + } + this_dbs_info->cpu = cpu; + adaptive_init_cpu(cpu); + + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); + dbs_tuners_ins.io_is_busy = should_io_be_busy(); + } + mutex_unlock(&dbs_mutex); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + + pm_idle_old = pm_idle; + pm_idle = cpufreq_adaptive_idle; + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + sysfs_remove_group(&policy->kobj, &dbs_attr_group); + mutex_destroy(&this_dbs_info->timer_mutex); + dbs_enable--; + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + pm_idle = pm_idle_old; + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +static inline void cpufreq_adaptive_update_time(void) +{ + struct cpu_dbs_info_s *this_dbs_info; + struct cpufreq_policy *policy; + int j; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); + policy = this_dbs_info->cur_policy; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time_us(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + j_dbs_info->prev_cpu_wall = cur_wall_time; + + j_dbs_info->prev_cpu_idle = cur_idle_time; + + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + + } + +} + +static int cpufreq_adaptive_up_task(void *data) +{ + unsigned long flags; + struct cpu_dbs_info_s *this_dbs_info; + struct cpufreq_policy *policy; + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); + policy = this_dbs_info->cur_policy; + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + spin_lock_irqsave(&up_cpumask_lock, flags); + + if (cpumask_empty(&up_cpumask)) { + spin_unlock_irqrestore(&up_cpumask_lock, flags); + schedule(); + + if (kthread_should_stop()) + break; + + spin_lock_irqsave(&up_cpumask_lock, flags); + } + + set_current_state(TASK_RUNNING); + + cpumask_clear(&up_cpumask); + spin_unlock_irqrestore(&up_cpumask_lock, flags); + + __cpufreq_driver_target(this_dbs_info->cur_policy, + target_freq, + CPUFREQ_RELATION_H); + if (policy->cur != policy->max) { + mutex_lock(&this_dbs_info->timer_mutex); + + schedule_delayed_work_on(0, &this_dbs_info->work, delay); + mutex_unlock(&this_dbs_info->timer_mutex); + cpufreq_adaptive_update_time(); + } + if (mutex_is_locked(&short_timer_mutex)) + mutex_unlock(&short_timer_mutex); + } + + return 0; +} + +static void cpufreq_adaptive_freq_down(struct work_struct *work) +{ + unsigned long flags; + struct cpu_dbs_info_s *this_dbs_info; + struct cpufreq_policy *policy; + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + spin_lock_irqsave(&down_cpumask_lock, flags); + cpumask_clear(&down_cpumask); + spin_unlock_irqrestore(&down_cpumask_lock, flags); + + this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); + policy = this_dbs_info->cur_policy; + + __cpufreq_driver_target(this_dbs_info->cur_policy, + target_freq, + CPUFREQ_RELATION_H); + + if (policy->cur != policy->min) { + mutex_lock(&this_dbs_info->timer_mutex); + + schedule_delayed_work_on(0, &this_dbs_info->work, delay); + mutex_unlock(&this_dbs_info->timer_mutex); + cpufreq_adaptive_update_time(); + } + + if (mutex_is_locked(&short_timer_mutex)) + mutex_unlock(&short_timer_mutex); +} + +static int __init cpufreq_gov_dbs_init(void) +{ + cputime64_t wall; + u64 idle_time; + int cpu = get_cpu(); + + struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD; + keep_minspeed_load = DEFAULT_KEEP_MINSPEED_LOAD; + step_up_load = DEFAULT_STEPUP_LOAD; + + idle_time = get_cpu_idle_time_us(cpu, &wall); + put_cpu(); + if (idle_time != -1ULL) { + /* Idle micro accounting is supported. Use finer thresholds */ + dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + dbs_tuners_ins.down_differential = + MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + /* + * In no_hz/micro accounting case we set the minimum frequency + * not depending on HZ, but fixed (very low). The deferred + * timer might skip some samples if idle/sleeping as needed. + */ + min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + } else { + /* For correct statistics, we need 10 ticks for each measure */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + } + + init_timer(&cpu_timer); + cpu_timer.function = cpufreq_adaptive_timer; + + up_task = kthread_create(cpufreq_adaptive_up_task, NULL, + "kadaptiveup"); + + if (IS_ERR(up_task)) + return PTR_ERR(up_task); + + sched_setscheduler_nocheck(up_task, SCHED_FIFO, ¶m); + get_task_struct(up_task); + + /* No rescuer thread, bind to CPU queuing the work for possibly + warm cache (probably doesn't matter much). */ + down_wq = alloc_workqueue("kadaptive_down", 0, 1); + + if (!down_wq) + goto err_freeuptask; + + INIT_WORK(&freq_scale_down_work, cpufreq_adaptive_freq_down); + + + return cpufreq_register_governor(&cpufreq_gov_adaptive); +err_freeuptask: + put_task_struct(up_task); + return -ENOMEM; +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_adaptive); +} + + +MODULE_AUTHOR("Venkatesh Pallipadi "); +MODULE_AUTHOR("Alexey Starikovskiy "); +MODULE_DESCRIPTION("'cpufreq_adaptive' - A dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ADAPTIVE +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_asswax.c b/drivers/cpufreq/cpufreq_asswax.c new file mode 100644 index 00000000..cd2d9333 --- /dev/null +++ b/drivers/cpufreq/cpufreq_asswax.c @@ -0,0 +1,945 @@ +/* + * drivers/cpufreq/cpufreq_asswax.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Rewritten by: Godmachine81 + * Worked on by: Zarboz + * Original Author: Erasmux + * + * A work in progress of merging BrazilianWax and Smartass into AssWAX! + * + * Originally Based on the interactive governor By Mike Chan (mike@android.com) + * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net) + * + * SMP support based on mod by faux123 + * + * For a general overview of asswax see the relavent part in + * Documentation/cpu-freq/governors.txt + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/******************** Tunable parameters: ********************/ + +/* + * The "ideal" frequency to use when awake. The governor will ramp up faster + * towards the ideal frequency and slower after it has passed it. Similarly, + * lowering the frequency towards the ideal frequency is faster than below it. + */ +static unsigned int awake_ideal_freq = 594000; + +static unsigned int interactive_ideal_freq = 810000; + +static unsigned int interactive_timeout = 2; + +/* + * The "ideal" frequency to use when suspended. + * When set to 0, the governor will not track the suspended state (meaning + * that practically when sleep_ideal_freq==0 the awake_ideal_freq is used + * also when suspended). + */ +static unsigned int sleep_ideal_freq = 384000; + +/* + * Freqeuncy delta when ramping up above the ideal freqeuncy. + * Zero disables and causes to always jump straight to max frequency. + * When below the ideal freqeuncy we always ramp up to the ideal freq. + */ +static unsigned int ramp_up_step = 192000; + +/* + * Freqeuncy delta when ramping down below the ideal freqeuncy. + * Zero disables and will calculate ramp down according to load heuristic. + * When above the ideal freqeuncy we always ramp down to the ideal freq. + */ +static unsigned int ramp_down_step = 0; + +/* + * CPU freq will be increased if measured load > max_cpu_load; + */ +static unsigned long max_cpu_load = 85; + +/* + * CPU freq will be decreased if measured load < min_cpu_load; + */ +static unsigned long min_cpu_load = 45; + +/* + * The minimum amount of time to spend at a frequency before we can ramp up. + * Notice we ignore this when we are below the ideal frequency. + */ +static unsigned long up_rate_us = 10000; + +/* + * The minimum amount of time to spend at a frequency before we can ramp down. + * Notice we ignore this when we are above the ideal frequency. + */ +static unsigned long down_rate_us = 20000; + +/* + * The frequency to set when waking up from sleep. + * When sleep_ideal_freq=0 this will have no effect. + */ +static unsigned int sleep_wakeup_freq = 151200; // typo? -dm + +/* + * Sampling rate, I highly recommend to leave it at 2. + */ +static unsigned int sample_rate_jiffies = 2; + +/*************** End of tunables ***************/ + +static atomic_t active_count = ATOMIC_INIT(0); + +struct asswax_info_s { + struct cpufreq_policy *cur_policy; + struct cpufreq_frequency_table *freq_table; + struct timer_list timer; + u64 time_in_idle; + u64 idle_exit_time; + u64 freq_change_time; + u64 freq_change_time_in_idle; + int cur_cpu_load; + int old_freq; + int ramp_dir; + unsigned int enable; + int ideal_speed; +}; +static DEFINE_PER_CPU(struct asswax_info_s, asswax_info); + +/* Workqueues handle frequency scaling */ +static struct workqueue_struct *up_wq; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_work; + +static cpumask_t work_cpumask; +static spinlock_t cpumask_lock; + +static unsigned int asswax_state = 1; // 0 = suspend, 1 = awake, 2 = interactive, 3 = touched + +//#define DEBUG +#ifndef DEBUG +#define dprintk(x...) do { } while (0) +#else +#define dprintk(flag,msg...) do { \ + if (debug_mask & flag) printk(KERN_DEBUG msg); \ + } while (0) + +enum { + ASSWAX_DEBUG_JUMPS=1, + ASSWAX_DEBUG_LOAD=2, + ASSWAX_DEBUG_ALG=4 +}; + +/* + * Combination of the above debug flags. + */ +static unsigned long debug_mask = 7; +#endif + +static int cpufreq_governor_asswax(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ASSWAX +static +#endif +struct cpufreq_governor cpufreq_gov_asswax = { + .name = "asswax", + .governor = cpufreq_governor_asswax, + .max_transition_latency = 9000000, + .owner = THIS_MODULE, +}; + +static void asswax_update_min_max(struct asswax_info_s *this_asswax, struct cpufreq_policy *policy, int state) { + int tmp = 0; + dprintk(ASSWAX_DEBUG_ALG, "asswax entering state %i on cpu %u\n", state, policy->cpu); + switch (state) { + case 0: + tmp = sleep_ideal_freq; + break; + case 1: + tmp = awake_ideal_freq; + break; + case 2: + case 3: + tmp = interactive_ideal_freq; + break; + } + this_asswax->ideal_speed = + policy->max > tmp ? (tmp > policy->min ? tmp : policy->min) : policy->max; +} + +static void asswax_update_min_max_allcpus(void) { + unsigned int i; + for_each_online_cpu(i) { + struct asswax_info_s *this_asswax = &per_cpu(asswax_info, i); + if (this_asswax->enable) + asswax_update_min_max(this_asswax,this_asswax->cur_policy,asswax_state); + } +} + +inline static unsigned int validate_freq(struct cpufreq_policy *policy, int freq) { + if (freq > (int)policy->max) + return policy->max; + if (freq < (int)policy->min) + return policy->min; + return freq; +} + +inline static void reset_timer(unsigned long cpu, struct asswax_info_s *this_asswax) { + this_asswax->time_in_idle = get_cpu_idle_time_us(cpu, &this_asswax->idle_exit_time); + mod_timer(&this_asswax->timer, jiffies + sample_rate_jiffies); +} + +inline static void work_cpumask_set(unsigned long cpu) { + unsigned long flags; + spin_lock_irqsave(&cpumask_lock, flags); + cpumask_set_cpu(cpu, &work_cpumask); + spin_unlock_irqrestore(&cpumask_lock, flags); +} + +inline static int work_cpumask_test_and_clear(unsigned long cpu) { + unsigned long flags; + int res = 0; + spin_lock_irqsave(&cpumask_lock, flags); + res = cpumask_test_and_clear_cpu(cpu, &work_cpumask); + spin_unlock_irqrestore(&cpumask_lock, flags); + return res; +} + +static void do_disable_interaction(unsigned long data) { + asswax_state = 1; + asswax_update_min_max_allcpus(); +} +static DEFINE_TIMER(interaction_timer, do_disable_interaction, 0, 0); +static inline void begin_interaction_timeout(void) { + mod_timer(&interaction_timer, jiffies + interactive_timeout); +} +static inline void end_interaction_timeout(void) { + if (timer_pending(&interaction_timer)) + del_timer(&interaction_timer); +} + + +inline static int target_freq(struct cpufreq_policy *policy, struct asswax_info_s *this_asswax, + int new_freq, int old_freq, int prefered_relation) { + int index, target; + struct cpufreq_frequency_table *table = this_asswax->freq_table; + + if (new_freq == old_freq) + return 0; + new_freq = validate_freq(policy,new_freq); + if (new_freq == old_freq) + return 0; + + if (table && + !cpufreq_frequency_table_target(policy,table,new_freq,prefered_relation,&index)) + { + target = table[index].frequency; + if (target == old_freq) { + // if for example we are ramping up to *at most* current + ramp_up_step + // but there is no such frequency higher than the current, try also + // to ramp up to *at least* current + ramp_up_step. + if (new_freq > old_freq && prefered_relation==CPUFREQ_RELATION_H + && !cpufreq_frequency_table_target(policy,table,new_freq, + CPUFREQ_RELATION_L,&index)) + target = table[index].frequency; + // simlarly for ramping down: + else if (new_freq < old_freq && prefered_relation==CPUFREQ_RELATION_L + && !cpufreq_frequency_table_target(policy,table,new_freq, + CPUFREQ_RELATION_H,&index)) + target = table[index].frequency; + } + + if (target == old_freq) { + // We should not get here: + // If we got here we tried to change to a validated new_freq which is different + // from old_freq, so there is no reason for us to remain at same frequency. + printk(KERN_WARNING "Smartass: frequency change failed: %d to %d => %d\n", + old_freq,new_freq,target); + return 0; + } + } + else target = new_freq; + + __cpufreq_driver_target(policy, target, prefered_relation); + + dprintk(ASSWAX_DEBUG_JUMPS,"SmartassQ: jumping from %d to %d => %d (%d)\n", + old_freq,new_freq,target,policy->cur); + + return target; +} + +static void cpufreq_asswax_timer(unsigned long cpu) +{ + u64 delta_idle; + u64 delta_time; + int cpu_load; + int old_freq; + u64 update_time; + u64 now_idle; + int queued_work = 0; + struct asswax_info_s *this_asswax = &per_cpu(asswax_info, cpu); + struct cpufreq_policy *policy = this_asswax->cur_policy; + + now_idle = get_cpu_idle_time_us(cpu, &update_time); + old_freq = policy->cur; + + if (this_asswax->idle_exit_time == 0 || update_time == this_asswax->idle_exit_time) + return; + + delta_idle = (now_idle - this_asswax->time_in_idle); + delta_time = (update_time - this_asswax->idle_exit_time); + + // If timer ran less than 1ms after short-term sample started, retry. + if (delta_time < 1000) { + if (!timer_pending(&this_asswax->timer)) + reset_timer(cpu,this_asswax); + return; + } + + if (delta_idle > delta_time) + cpu_load = 0; + else + cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time; + + dprintk(ASSWAX_DEBUG_LOAD,"asswaxT @ %d: load %d (delta_time %llu)\n", + old_freq,cpu_load,delta_time); + + this_asswax->cur_cpu_load = cpu_load; + this_asswax->old_freq = old_freq; + + // Scale up if load is above max or if there where no idle cycles since coming out of idle, + // additionally, if we are at or above the ideal_speed, verify we have been at this frequency + // for at least up_rate_us: + if (cpu_load > max_cpu_load || delta_idle == 0) + { + if (old_freq < policy->max && + (old_freq < this_asswax->ideal_speed || delta_idle == 0 || + (update_time - this_asswax->freq_change_time) >= up_rate_us)) + { + dprintk(ASSWAX_DEBUG_ALG,"asswaxT @ %d ramp up: load %d (delta_idle %llu)\n", + old_freq,cpu_load,delta_idle); + this_asswax->ramp_dir = 1; + work_cpumask_set(cpu); + queue_work(up_wq, &freq_scale_work); + queued_work = 1; + if (asswax_state == 2 && old_freq == this_asswax->ideal_speed) + end_interaction_timeout(); + } + else this_asswax->ramp_dir = 0; + } + // Similarly for scale down: load should be below min and if we are at or below ideal + // frequency we require that we have been at this frequency for at least down_rate_us: + else if (cpu_load < min_cpu_load && old_freq > policy->min && + (old_freq > this_asswax->ideal_speed || + (update_time - this_asswax->freq_change_time) >= down_rate_us)) + { + dprintk(ASSWAX_DEBUG_ALG,"asswaxT @ %d ramp down: load %d (delta_idle %llu)\n", + old_freq,cpu_load,delta_idle); + this_asswax->ramp_dir = -1; + work_cpumask_set(cpu); + queue_work(down_wq, &freq_scale_work); + queued_work = 1; + } + else this_asswax->ramp_dir = 0; + + // To avoid unnecessary load when the CPU is already at high load, we don't + // reset ourselves if we are at max speed. If and when there are idle cycles, + // the idle loop will activate the timer. + // Additionally, if we queued some work, the work task will reset the timer + // after it has done its adjustments. + if (!queued_work && old_freq < policy->max) + reset_timer(cpu,this_asswax); +} + +static int cpufreq_idle_notifier(struct notifier_block *nb, + unsigned long val, void *data) { + struct asswax_info_s *this_asswax = &per_cpu(asswax_info, smp_processor_id()); + struct cpufreq_policy *policy = this_asswax->cur_policy; + + if (!this_asswax->enable) + return NOTIFY_DONE; + + if (val == IDLE_START) { + if (policy->cur == policy->max && !timer_pending(&this_asswax->timer)) { + reset_timer(smp_processor_id(), this_asswax); + } else if (policy->cur == policy->min) { + if (timer_pending(&this_asswax->timer)) + del_timer(&this_asswax->timer); + else if (asswax_state == 2) + begin_interaction_timeout(); + } + } else if (val == IDLE_END) { + if (policy->cur == policy->min && !timer_pending(&this_asswax->timer)) + reset_timer(smp_processor_id(), this_asswax); + } + + return NOTIFY_OK; +} +static struct notifier_block cpufreq_idle_nb = { + .notifier_call = cpufreq_idle_notifier, +}; + +/* We use the same work function to sale up and down */ +static void cpufreq_asswax_freq_change_time_work(struct work_struct *work) +{ + unsigned int cpu; + int new_freq; + int old_freq; + int ramp_dir; + struct asswax_info_s *this_asswax; + struct cpufreq_policy *policy; + unsigned int relation = CPUFREQ_RELATION_L; + for_each_possible_cpu(cpu) { + if (!work_cpumask_test_and_clear(cpu)) + continue; + this_asswax = &per_cpu(asswax_info, cpu); + + ramp_dir = this_asswax->ramp_dir; + this_asswax->ramp_dir = 0; + + old_freq = this_asswax->old_freq; + policy = this_asswax->cur_policy; + + if (old_freq != policy->cur) { + // frequency was changed by someone else? + // Removing printk to prevent dmesg flooding while using CPU Master or other 3rd Party Cpu freq profilers + //printk(KERN_WARNING "Smartass: frequency changed by 3rd party: %d to %d\n", + // old_freq,policy->cur); + new_freq = old_freq; + } + else if (ramp_dir > 0 && nr_running() > 1) { + // ramp up logic: + if (old_freq < this_asswax->ideal_speed) + new_freq = this_asswax->ideal_speed; + else if (ramp_up_step) { + new_freq = old_freq + ramp_up_step; + relation = CPUFREQ_RELATION_H; + } + else { + new_freq = policy->max; + relation = CPUFREQ_RELATION_H; + } + dprintk(ASSWAX_DEBUG_ALG,"asswaxQ @ %d ramp up: ramp_dir=%d ideal=%d\n", + old_freq,ramp_dir,this_asswax->ideal_speed); + } + else if (ramp_dir < 0) { + // ramp down logic: + if (old_freq > this_asswax->ideal_speed) { + new_freq = this_asswax->ideal_speed; + relation = CPUFREQ_RELATION_H; + } + else if (ramp_down_step) + new_freq = old_freq - ramp_down_step; + else { + // Load heuristics: Adjust new_freq such that, assuming a linear + // scaling of load vs. frequency, the load in the new frequency + // will be max_cpu_load: + new_freq = old_freq * this_asswax->cur_cpu_load / max_cpu_load; + if (new_freq > old_freq) // min_cpu_load > max_cpu_load ?! + new_freq = old_freq -1; + } + dprintk(ASSWAX_DEBUG_ALG,"asswaxQ @ %d ramp down: ramp_dir=%d ideal=%d\n", + old_freq,ramp_dir,this_asswax->ideal_speed); + } + else { // ramp_dir==0 ?! Could the timer change its mind about a queued ramp up/down + // before the work task gets to run? + // This may also happen if we refused to ramp up because the nr_running()==1 + new_freq = old_freq; + dprintk(ASSWAX_DEBUG_ALG,"asswaxQ @ %d nothing: ramp_dir=%d nr_running=%lu\n", + old_freq,ramp_dir,nr_running()); + } + + // do actual ramp up (returns 0, if frequency change failed): + new_freq = target_freq(policy,this_asswax,new_freq,old_freq,relation); + if (new_freq) + this_asswax->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu,&this_asswax->freq_change_time); + + // reset timer: + if (new_freq < policy->max) + reset_timer(cpu,this_asswax); + // if we are maxed out, it is pointless to use the timer + // (idle cycles wake up the timer when the timer comes) + else if (timer_pending(&this_asswax->timer)) + del_timer(&this_asswax->timer); + + cpufreq_notify_utilization(policy, + (this_asswax->cur_cpu_load * policy->cur) / policy->max); + } +} + +#ifdef DEBUG +static ssize_t show_debug_mask(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", debug_mask); +} + +static ssize_t store_debug_mask(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0) + debug_mask = input; + return count; +} +#endif + +static ssize_t show_up_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", up_rate_us); +} + +static ssize_t store_up_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0 && input <= 100000000) + up_rate_us = input; + return count; +} + +static ssize_t show_down_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", down_rate_us); +} + +static ssize_t store_down_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0 && input <= 100000000) + down_rate_us = input; + return count; +} + +static ssize_t show_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sleep_ideal_freq); +} + +static ssize_t store_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) { + sleep_ideal_freq = input; + if (asswax_state == 0) + asswax_update_min_max_allcpus(); + } + return count; +} + +static ssize_t show_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sleep_wakeup_freq); +} + +static ssize_t store_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + sleep_wakeup_freq = input; + return count; +} + +static ssize_t show_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", awake_ideal_freq); +} + +static ssize_t store_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) { + awake_ideal_freq = input; + if (asswax_state == 1) + asswax_update_min_max_allcpus(); + } + return count; +} + +static ssize_t show_interactive_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", interactive_ideal_freq); +} + +static ssize_t store_interactive_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) { + interactive_ideal_freq = input; + if (asswax_state == 1) + asswax_update_min_max_allcpus(); + } + return count; +} + +static ssize_t show_interactive_timeout_jiffies(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", interactive_timeout); +} + +static ssize_t store_interactive_timeout_jiffies(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) { + interactive_timeout = input; + if (asswax_state == 1) + asswax_update_min_max_allcpus(); + } + return count; +} + +static ssize_t show_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sample_rate_jiffies); +} + +static ssize_t store_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 1000) + sample_rate_jiffies = input; + return count; +} + +static ssize_t show_ramp_up_step(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", ramp_up_step); +} + +static ssize_t store_ramp_up_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + ramp_up_step = input; + return count; +} + +static ssize_t show_ramp_down_step(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", ramp_down_step); +} + +static ssize_t store_ramp_down_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + ramp_down_step = input; + return count; +} + +static ssize_t show_max_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", max_cpu_load); +} + +static ssize_t store_max_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 100) + max_cpu_load = input; + return count; +} + +static ssize_t show_min_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", min_cpu_load); +} + +static ssize_t store_min_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input < 100) + min_cpu_load = input; + return count; +} + +#define define_global_rw_attr(_name) \ +static struct global_attr _name##_attr = \ + __ATTR(_name, 0644, show_##_name, store_##_name) + +#ifdef DEBUG +define_global_rw_attr(debug_mask); +#endif +define_global_rw_attr(up_rate_us); +define_global_rw_attr(down_rate_us); +define_global_rw_attr(sleep_ideal_freq); +define_global_rw_attr(sleep_wakeup_freq); +define_global_rw_attr(awake_ideal_freq); +define_global_rw_attr(interactive_ideal_freq); +define_global_rw_attr(interactive_timeout_jiffies); +define_global_rw_attr(sample_rate_jiffies); +define_global_rw_attr(ramp_up_step); +define_global_rw_attr(ramp_down_step); +define_global_rw_attr(max_cpu_load); +define_global_rw_attr(min_cpu_load); + +static struct attribute * asswax_attributes[] = { +#ifdef DEBUG + &debug_mask_attr.attr, +#endif + &up_rate_us_attr.attr, + &down_rate_us_attr.attr, + &sleep_ideal_freq_attr.attr, + &sleep_wakeup_freq_attr.attr, + &awake_ideal_freq_attr.attr, + &interactive_ideal_freq_attr.attr, + &interactive_timeout_jiffies_attr.attr, + &sample_rate_jiffies_attr.attr, + &ramp_up_step_attr.attr, + &ramp_down_step_attr.attr, + &max_cpu_load_attr.attr, + &min_cpu_load_attr.attr, + NULL, +}; + +static struct attribute_group asswax_attr_group = { + .attrs = asswax_attributes, + .name = "asswax", +}; + +static int cpufreq_governor_asswax(struct cpufreq_policy *new_policy, + unsigned int event) +{ + unsigned int cpu = new_policy->cpu; + int rc; + struct asswax_info_s *this_asswax = &per_cpu(asswax_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!new_policy->cur)) + return -EINVAL; + + this_asswax->cur_policy = new_policy; + + this_asswax->enable = 1; + + asswax_update_min_max(this_asswax,new_policy,asswax_state); + + this_asswax->freq_table = cpufreq_frequency_get_table(cpu); + if (!this_asswax->freq_table) + printk(KERN_WARNING "Smartass: no frequency table for cpu %d?!\n",cpu); + + // Do not register the idle hook and create sysfs + // entries if we have already done so. + if (atomic_inc_return(&active_count) <= 1) { + rc = sysfs_create_group(cpufreq_global_kobject, + &asswax_attr_group); + if (rc) + return rc; + + idle_notifier_register(&cpufreq_idle_nb); + } + + //if (this_asswax->cur_policy->cur < new_policy->max && !timer_pending(&this_asswax->timer)) + if (!timer_pending(&this_asswax->timer)) + reset_timer(cpu,this_asswax); + + break; + + case CPUFREQ_GOV_LIMITS: + asswax_update_min_max(this_asswax,new_policy,asswax_state); + + if (this_asswax->cur_policy->cur > new_policy->max) { + dprintk(ASSWAX_DEBUG_JUMPS,"SmartassI: jumping to new max freq: %d\n",new_policy->max); + __cpufreq_driver_target(this_asswax->cur_policy, + new_policy->max, CPUFREQ_RELATION_H); + } + else if (this_asswax->cur_policy->cur < new_policy->min) { + dprintk(ASSWAX_DEBUG_JUMPS,"SmartassI: jumping to new min freq: %d\n",new_policy->min); + __cpufreq_driver_target(this_asswax->cur_policy, + new_policy->min, CPUFREQ_RELATION_L); + } + + if (this_asswax->cur_policy->cur < new_policy->max && !timer_pending(&this_asswax->timer)) + reset_timer(cpu,this_asswax); + + break; + + case CPUFREQ_GOV_STOP: + this_asswax->enable = 0; + del_timer(&this_asswax->timer); + flush_work(&freq_scale_work); + this_asswax->idle_exit_time = 0; + + if (atomic_dec_return(&active_count) < 1) { + sysfs_remove_group(cpufreq_global_kobject, + &asswax_attr_group); + idle_notifier_unregister(&cpufreq_idle_nb); + } + break; + } + + return 0; +} + +static void asswax_suspend(int cpu, int suspend) +{ + struct asswax_info_s *this_asswax = &per_cpu(asswax_info, smp_processor_id()); + struct cpufreq_policy *policy = this_asswax->cur_policy; + unsigned int new_freq; + + if (!this_asswax->enable) + return; + + asswax_update_min_max(this_asswax,policy,suspend); + if (!suspend) { // resume at max speed: + new_freq = validate_freq(policy,sleep_wakeup_freq); + + dprintk(ASSWAX_DEBUG_JUMPS,"SmartassS: awaking at %d\n",new_freq); + + __cpufreq_driver_target(policy, new_freq, + CPUFREQ_RELATION_L); + } else { + // to avoid wakeup issues with quick sleep/wakeup don't change actual frequency when entering sleep + // to allow some time to settle down. Instead we just reset our statistics (and reset the timer). + // Eventually, the timer will adjust the frequency if necessary. + + this_asswax->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu,&this_asswax->freq_change_time); + + dprintk(ASSWAX_DEBUG_JUMPS,"SmartassS: suspending at %d\n",policy->cur); + } + + reset_timer(smp_processor_id(),this_asswax); +} + +static void asswax_early_suspend(struct early_suspend *handler) { + int i; + if (asswax_state == 0 || sleep_ideal_freq==0) // disable behavior for sleep_ideal_freq==0 + return; + asswax_state = 0; + for_each_online_cpu(i) + asswax_suspend(i,0); +} + +static void asswax_late_resume(struct early_suspend *handler) { + int i; + if (asswax_state > 0) // already not suspended so nothing to do + return; + asswax_state = 1; + for_each_online_cpu(i) + asswax_suspend(i,1); +} + +static struct early_suspend asswax_power_suspend = { + .suspend = asswax_early_suspend, + .resume = asswax_late_resume, +#ifdef CONFIG_MACH_HERO + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +#endif +}; + +static int __init cpufreq_asswax_init(void) +{ + unsigned int i; + struct asswax_info_s *this_asswax; + + spin_lock_init(&cpumask_lock); + + /* Initalize per-cpu data: */ + for_each_possible_cpu(i) { + this_asswax = &per_cpu(asswax_info, i); + this_asswax->enable = 0; + this_asswax->cur_policy = 0; + this_asswax->ramp_dir = 0; + this_asswax->time_in_idle = 0; + this_asswax->idle_exit_time = 0; + this_asswax->freq_change_time = 0; + this_asswax->freq_change_time_in_idle = 0; + this_asswax->cur_cpu_load = 0; + // intialize timer: + init_timer_deferrable(&this_asswax->timer); + this_asswax->timer.function = cpufreq_asswax_timer; + this_asswax->timer.data = i; + work_cpumask_test_and_clear(i); + } + + // Scale up is high priority + up_wq = create_workqueue("kasswax_up"); + down_wq = create_workqueue("kasswax_down"); + if (!up_wq || !down_wq) + return -ENOMEM; + + INIT_WORK(&freq_scale_work, cpufreq_asswax_freq_change_time_work); + + register_early_suspend(&asswax_power_suspend); + + return cpufreq_register_governor(&cpufreq_gov_asswax); +} + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ASSWAX +fs_initcall(cpufreq_asswax_init); +#else +module_init(cpufreq_asswax_init); +#endif + +static void __exit cpufreq_asswax_exit(void) +{ + end_interaction_timeout(); + cpufreq_unregister_governor(&cpufreq_gov_asswax); + destroy_workqueue(up_wq); + destroy_workqueue(down_wq); +} + +module_exit(cpufreq_asswax_exit); + +MODULE_AUTHOR ("godmachine81 rewrite- original author of Smartass and Brazilian Wax - Erasmux"); +MODULE_DESCRIPTION ("'cpufreq_asswax' - A combination of Brazilian Wax and Smartass"); +MODULE_LICENSE ("GPL"); \ No newline at end of file diff --git a/drivers/cpufreq/cpufreq_dancedance.c b/drivers/cpufreq/cpufreq_dancedance.c new file mode 100644 index 00000000..0811bd95 --- /dev/null +++ b/drivers/cpufreq/cpufreq_dancedance.c @@ -0,0 +1,650 @@ +/* + * drivers/cpufreq/cpufreq_dancedance.c + * + * Copyright (C) 2012 Shaun Nuzzo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_UP_THRESHOLD (90) +#define DEF_FREQUENCY_DOWN_THRESHOLD (30) +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (10) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_DANCEDANCE +static +#endif +struct cpufreq_governor cpufreq_gov_dancedance = { + .name = "dancedance", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct cpufreq_frequency_table *freq_table; + unsigned int down_skip; + unsigned int requested_freq; + unsigned int freq_lo; + unsigned int freq_lo_jiffies; + unsigned int freq_hi_jiffies; + unsigned int rate_mult; + int cpu; + unsigned int sample_type:1; + unsigned long long prev_idletime; + unsigned long long prev_idleusage; + unsigned int enable:1; + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int ignore_nice; + unsigned int sampling_down_factor; + unsigned int powersave_bias; + unsigned int io_is_busy; + unsigned int target_residency; + unsigned int allowed_misses; + unsigned int freq_step; + unsigned int down_threshold; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .ignore_nice = 0, + .freq_step = 5, +}; + +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, + u64 *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + + return idle_time; +} + +/* keep track of frequency transitions */ +static int +dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, + freq->cpu); + + struct cpufreq_policy *policy; + + if (!this_dbs_info->enable) + return 0; + + policy = this_dbs_info->cur_policy; + + /* + * we only care if our internally tracked freq moves outside + * the 'valid' ranges of freqency available to us otherwise + * we do not change it + */ + if (this_dbs_info->requested_freq > policy->max + || this_dbs_info->requested_freq < policy->min) + this_dbs_info->requested_freq = freq->new; + + return 0; +} + +static struct notifier_block dbs_cpufreq_notifier_block = { + .notifier_call = dbs_cpufreq_notifier +}; + +/************************** sysfs interface ************************/ +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_min); + +/* cpufreq_dancedance Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(sampling_down_factor, sampling_down_factor); +show_one(up_threshold, up_threshold); +show_one(down_threshold, down_threshold); +show_one(ignore_nice_load, ignore_nice); +show_one(freq_step, freq_step); + +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + + dbs_tuners_ins.sampling_down_factor = input; + return count; +} + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > 100 || + input <= dbs_tuners_ins.down_threshold) + return -EINVAL; + + dbs_tuners_ins.up_threshold = input; + return count; +} + +static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + /* cannot be lower than 11 otherwise freq will not fall */ + if (ret != 1 || input < 11 || input > 100 || + input >= dbs_tuners_ins.up_threshold) + return -EINVAL; + + dbs_tuners_ins.down_threshold = input; + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */ + return count; + + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(cs_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + return count; +} + +static ssize_t store_freq_step(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input > 100) + input = 100; + + /* no need to test here if freq_step is zero as the user might actually + * want this, they would be crazy though :) */ + dbs_tuners_ins.freq_step = input; + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(up_threshold); +define_one_global_rw(down_threshold); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(freq_step); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &sampling_down_factor.attr, + &up_threshold.attr, + &down_threshold.attr, + &ignore_nice_load.attr, + &freq_step.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "dancedance", +}; + +/************************** sysfs end ************************/ + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + unsigned int load = 0; + unsigned int max_load = 0; + unsigned int freq_target; + + struct cpufreq_policy *policy; + unsigned int j; + + policy = this_dbs_info->cur_policy; + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate*sampling_down_factor, we check, if current + * idle time is more than 80%, then we try to decrease frequency + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of maximum frequency + */ + + /* Get Absolute Load */ + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time; + unsigned int idle_time, wall_time; + + j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + + wall_time = (unsigned int) (cur_wall_time - j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) (cur_idle_time - j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + if (load > max_load) + max_load = load; + } + + /* + * break out if we 'cannot' reduce the speed as the user might + * want freq_step to be zero + */ + if (dbs_tuners_ins.freq_step == 0) + return; + + /* Check for frequency increase */ + if (max_load > dbs_tuners_ins.up_threshold) { + this_dbs_info->down_skip = 0; + + /* if we are already at full speed then break out early */ + if (this_dbs_info->requested_freq == policy->max) + return; + + freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; + + /* max freq cannot be less than 100. But who knows.... */ + if (unlikely(freq_target == 0)) + freq_target = 5; + + this_dbs_info->requested_freq += freq_target; + if (this_dbs_info->requested_freq > policy->max) + this_dbs_info->requested_freq = policy->max; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, + CPUFREQ_RELATION_H); + return; + } + + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (max_load < (dbs_tuners_ins.down_threshold - 10)) { + freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; + + this_dbs_info->requested_freq -= freq_target; + if (this_dbs_info->requested_freq < policy->min) + this_dbs_info->requested_freq = policy->min; + + /* + * if we cannot reduce the frequency anymore, break out early + */ + if (policy->cur == policy->min) + return; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, + CPUFREQ_RELATION_H); + return; + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int sample_type = dbs_info->sample_type; + + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + delay -= jiffies % delay; + + mutex_lock(&dbs_info->timer_mutex); + + /* Common NORMAL_SAMPLE setup */ + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + if (!dbs_tuners_ins.powersave_bias || + sample_type == DBS_NORMAL_SAMPLE) { + dbs_check_cpu(dbs_info); + if (dbs_info->freq_lo) { + /* Setup timer for SUB_SAMPLE */ + dbs_info->sample_type = DBS_SUB_SAMPLE; + delay = dbs_info->freq_hi_jiffies; + } else { + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate + * dbs_info->rate_mult); + if (num_online_cpus() > 1) + delay -= jiffies % delay; + } + } else { + __cpufreq_driver_target(dbs_info->cur_policy, + dbs_info->freq_lo, CPUFREQ_RELATION_H); + delay = dbs_info->freq_lo_jiffies; + } + + schedule_delayed_work_on(cpu, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + dbs_info->enable = 1; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + dbs_info->enable = 0; + cancel_delayed_work_sync(&dbs_info->work); +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + } + this_dbs_info->down_skip = 0; + this_dbs_info->requested_freq = policy->cur; + + mutex_init(&this_dbs_info->timer_mutex); + dbs_enable++; + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + /* + * conservative does not implement micro like ondemand + * governor, thus we are bound to jiffes/HZ + */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); + + cpufreq_register_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + mutex_unlock(&dbs_mutex); + + dbs_timer_init(this_dbs_info); + + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + dbs_enable--; + mutex_destroy(&this_dbs_info->timer_mutex); + + /* + * Stop the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 0) + cpufreq_unregister_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target( + this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target( + this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + + break; + } + return 0; +} + +static int __init cpufreq_gov_dbs_init(void) +{ + return cpufreq_register_governor(&cpufreq_gov_dancedance); +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_dancedance); +} + +MODULE_AUTHOR("Shaun Nuzzo "); +MODULE_DESCRIPTION("'cpufreq_dancedance' - A dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors " + "optimised for use in a battery environment" + "Modified code based off conservative with a faster" + "deep sleep rate"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_DANCEDANCE +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c new file mode 100644 index 00000000..a5983c1e --- /dev/null +++ b/drivers/cpufreq/cpufreq_interactive.c @@ -0,0 +1,1259 @@ +/* + * drivers/cpufreq/cpufreq_interactive.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Mike Chan (mike@android.com) + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +static int active_count; + +struct cpufreq_interactive_cpuinfo { + struct timer_list cpu_timer; + struct timer_list cpu_slack_timer; + spinlock_t load_lock; /* protects the next 4 fields */ + u64 time_in_idle; + u64 time_in_idle_timestamp; + u64 cputime_speedadj; + u64 cputime_speedadj_timestamp; + struct cpufreq_policy *policy; + struct cpufreq_frequency_table *freq_table; + unsigned int target_freq; + unsigned int floor_freq; + u64 floor_validate_time; + u64 hispeed_validate_time; + struct rw_semaphore enable_sem; + int governor_enabled; + int cpu_load; +}; + +static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo); + +/* realtime thread handles frequency scaling */ +static struct task_struct *speedchange_task; +static cpumask_t speedchange_cpumask; +static spinlock_t speedchange_cpumask_lock; +static struct mutex gov_lock; + +/* Hi speed to bump to from lo speed when load burst (default max) */ +static unsigned int hispeed_freq; + +/* Go to hi speed when CPU load at or above this value. */ +#define DEFAULT_GO_HISPEED_LOAD 99 +static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD; + +/* Target load. Lower values result in higher CPU speeds. */ +#define DEFAULT_TARGET_LOAD 90 +static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD}; +static spinlock_t target_loads_lock; +static unsigned int *target_loads = default_target_loads; +static int ntarget_loads = ARRAY_SIZE(default_target_loads); + +/* + * The minimum amount of time to spend at a frequency before we can ramp down. + */ +#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC) +static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME; + +/* + * The sample rate of the timer used to increase frequency + */ +#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC) +static unsigned long timer_rate = DEFAULT_TIMER_RATE; + +/* + * Wait this long before raising speed above hispeed, by default a single + * timer interval. + */ +#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE +static unsigned int default_above_hispeed_delay[] = { + DEFAULT_ABOVE_HISPEED_DELAY }; +static spinlock_t above_hispeed_delay_lock; +static unsigned int *above_hispeed_delay = default_above_hispeed_delay; +static int nabove_hispeed_delay = ARRAY_SIZE(default_above_hispeed_delay); + +/* Non-zero means indefinite speed boost active */ +static int boost_val; +/* Duration of a boot pulse in usecs */ +static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME; +/* End time of boost pulse in ktime converted to usecs */ +static u64 boostpulse_endtime; + +/* + * Max additional time to wait in idle, beyond timer_rate, at speeds above + * minimum before wakeup to reduce speed, or -1 if unnecessary. + */ +#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE) +static int timer_slack_val = DEFAULT_TIMER_SLACK; + +static bool io_is_busy; + +static int cpufreq_governor_interactive(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE +static +#endif +struct cpufreq_governor cpufreq_gov_interactive = { + .name = "interactive", + .governor = cpufreq_governor_interactive, + .max_transition_latency = 10000000, + .owner = THIS_MODULE, +}; + +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, + cputime64_t *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, + cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + idle_time = get_cpu_idle_time_jiffy(cpu, wall); + else if (!io_is_busy) + idle_time += get_cpu_iowait_time_us(cpu, wall); + + return idle_time; +} + +static void cpufreq_interactive_timer_resched( + struct cpufreq_interactive_cpuinfo *pcpu) +{ + unsigned long expires; + unsigned long flags; + + spin_lock_irqsave(&pcpu->load_lock, flags); + pcpu->time_in_idle = + get_cpu_idle_time(smp_processor_id(), + &pcpu->time_in_idle_timestamp); + pcpu->cputime_speedadj = 0; + pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; + expires = jiffies + usecs_to_jiffies(timer_rate); + mod_timer_pinned(&pcpu->cpu_timer, expires); + + if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) { + expires += usecs_to_jiffies(timer_slack_val); + mod_timer_pinned(&pcpu->cpu_slack_timer, expires); + } + + spin_unlock_irqrestore(&pcpu->load_lock, flags); +} + +/* The caller shall take enable_sem write semaphore to avoid any timer race. + * The cpu_timer and cpu_slack_timer must be deactivated when calling this + * function. + */ +static void cpufreq_interactive_timer_start(int cpu) +{ + struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); + unsigned long expires = jiffies + usecs_to_jiffies(timer_rate); + unsigned long flags; + + pcpu->cpu_timer.expires = expires; + add_timer_on(&pcpu->cpu_timer, cpu); + if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) { + expires += usecs_to_jiffies(timer_slack_val); + pcpu->cpu_slack_timer.expires = expires; + add_timer_on(&pcpu->cpu_slack_timer, cpu); + } + + spin_lock_irqsave(&pcpu->load_lock, flags); + pcpu->time_in_idle = + get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp); + pcpu->cputime_speedadj = 0; + pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; + spin_unlock_irqrestore(&pcpu->load_lock, flags); +} + +static unsigned int freq_to_above_hispeed_delay(unsigned int freq) +{ + int i; + unsigned int ret; + unsigned long flags; + + spin_lock_irqsave(&above_hispeed_delay_lock, flags); + + for (i = 0; i < nabove_hispeed_delay - 1 && + freq >= above_hispeed_delay[i+1]; i += 2) + ; + + ret = above_hispeed_delay[i]; + spin_unlock_irqrestore(&above_hispeed_delay_lock, flags); + return ret; +} + +static unsigned int freq_to_targetload(unsigned int freq) +{ + int i; + unsigned int ret; + unsigned long flags; + + spin_lock_irqsave(&target_loads_lock, flags); + + for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2) + ; + + ret = target_loads[i]; + spin_unlock_irqrestore(&target_loads_lock, flags); + return ret; +} + +/* + * If increasing frequencies never map to a lower target load then + * choose_freq() will find the minimum frequency that does not exceed its + * target load given the current load. + */ + +static unsigned int choose_freq( + struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq) +{ + unsigned int freq = pcpu->policy->cur; + unsigned int prevfreq, freqmin, freqmax; + unsigned int tl; + int index; + + freqmin = 0; + freqmax = UINT_MAX; + + do { + prevfreq = freq; + tl = freq_to_targetload(freq); + + /* + * Find the lowest frequency where the computed load is less + * than or equal to the target load. + */ + + if (cpufreq_frequency_table_target( + pcpu->policy, pcpu->freq_table, loadadjfreq / tl, + CPUFREQ_RELATION_L, &index)) + break; + freq = pcpu->freq_table[index].frequency; + + if (freq > prevfreq) { + /* The previous frequency is too low. */ + freqmin = prevfreq; + + if (freq >= freqmax) { + /* + * Find the highest frequency that is less + * than freqmax. + */ + if (cpufreq_frequency_table_target( + pcpu->policy, pcpu->freq_table, + freqmax - 1, CPUFREQ_RELATION_H, + &index)) + break; + freq = pcpu->freq_table[index].frequency; + + if (freq == freqmin) { + /* + * The first frequency below freqmax + * has already been found to be too + * low. freqmax is the lowest speed + * we found that is fast enough. + */ + freq = freqmax; + break; + } + } + } else if (freq < prevfreq) { + /* The previous frequency is high enough. */ + freqmax = prevfreq; + + if (freq <= freqmin) { + /* + * Find the lowest frequency that is higher + * than freqmin. + */ + if (cpufreq_frequency_table_target( + pcpu->policy, pcpu->freq_table, + freqmin + 1, CPUFREQ_RELATION_L, + &index)) + break; + freq = pcpu->freq_table[index].frequency; + + /* + * If freqmax is the first frequency above + * freqmin then we have already found that + * this speed is fast enough. + */ + if (freq == freqmax) + break; + } + } + + /* If same frequency chosen as previous then done. */ + } while (freq != prevfreq); + + return freq; +} + +static u64 update_load(int cpu) +{ + struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); + u64 now; + u64 now_idle; + unsigned int delta_idle; + unsigned int delta_time; + u64 active_time; + + now_idle = get_cpu_idle_time(cpu, &now); + delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle); + delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp); + + if (delta_time <= delta_idle) + active_time = 0; + else + active_time = delta_time - delta_idle; + + pcpu->cputime_speedadj += active_time * pcpu->policy->cur; + + pcpu->time_in_idle = now_idle; + pcpu->time_in_idle_timestamp = now; + return now; +} + +static void cpufreq_interactive_timer(unsigned long data) +{ + u64 now; + unsigned int delta_time; + u64 cputime_speedadj; + int cpu_load; + struct cpufreq_interactive_cpuinfo *pcpu = + &per_cpu(cpuinfo, data); + unsigned int new_freq; + unsigned int loadadjfreq; + unsigned int index; + unsigned long flags; + bool boosted; + + if (!down_read_trylock(&pcpu->enable_sem)) + return; + if (!pcpu->governor_enabled) + goto exit; + + spin_lock_irqsave(&pcpu->load_lock, flags); + now = update_load(data); + delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp); + cputime_speedadj = pcpu->cputime_speedadj; + spin_unlock_irqrestore(&pcpu->load_lock, flags); + + if (WARN_ON_ONCE(!delta_time)) + goto rearm; + + do_div(cputime_speedadj, delta_time); + loadadjfreq = (unsigned int)cputime_speedadj * 100; + cpu_load = loadadjfreq / pcpu->target_freq; + boosted = boost_val || now < boostpulse_endtime; + + pcpu->cpu_load = cpu_load; + + if (cpu_load >= go_hispeed_load || boosted) { + if (pcpu->target_freq < hispeed_freq) { + new_freq = hispeed_freq; + } else { + new_freq = choose_freq(pcpu, loadadjfreq); + + if (new_freq < hispeed_freq) + new_freq = hispeed_freq; + } + } else { + new_freq = choose_freq(pcpu, loadadjfreq); + } + + if (kt_freq_control[1] == 0 && pcpu->target_freq >= hispeed_freq && + new_freq > pcpu->target_freq && + now - pcpu->hispeed_validate_time < + freq_to_above_hispeed_delay(pcpu->target_freq)) { + trace_cpufreq_interactive_notyet( + data, cpu_load, pcpu->target_freq, + pcpu->policy->cur, new_freq); + goto rearm; + } + + pcpu->hispeed_validate_time = now; + + if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table, + new_freq, CPUFREQ_RELATION_L, + &index)) + goto rearm; + + new_freq = pcpu->freq_table[index].frequency; + + /* + * Do not scale below floor_freq unless we have been at or above the + * floor frequency for the minimum sample time since last validated. + */ + if (kt_freq_control[1] == 0 && new_freq < pcpu->floor_freq) { + if (now - pcpu->floor_validate_time < min_sample_time) { + trace_cpufreq_interactive_notyet( + data, cpu_load, pcpu->target_freq, + pcpu->policy->cur, new_freq); + goto rearm; + } + } + + /* + * Update the timestamp for checking whether speed has been held at + * or above the selected frequency for a minimum of min_sample_time, + * if not boosted to hispeed_freq. If boosted to hispeed_freq then we + * allow the speed to drop as soon as the boostpulse duration expires + * (or the indefinite boost is turned off). + */ + + if (!boosted || new_freq > hispeed_freq) { + pcpu->floor_freq = new_freq; + pcpu->floor_validate_time = now; + } + + if (pcpu->target_freq == new_freq && kt_freq_control[1] == 0) { + trace_cpufreq_interactive_already( + data, cpu_load, pcpu->target_freq, + pcpu->policy->cur, new_freq); + goto rearm_if_notmax; + } + + trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq, + pcpu->policy->cur, new_freq); + + pcpu->target_freq = new_freq; + spin_lock_irqsave(&speedchange_cpumask_lock, flags); + cpumask_set_cpu(data, &speedchange_cpumask); + spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); + wake_up_process(speedchange_task); + +rearm_if_notmax: + /* + * Already set max speed and don't see a need to change that, + * wait until next idle to re-evaluate, don't need timer. + */ + if (pcpu->target_freq == pcpu->policy->max) + goto exit; + +rearm: + if (!timer_pending(&pcpu->cpu_timer)) + cpufreq_interactive_timer_resched(pcpu); + +exit: + up_read(&pcpu->enable_sem); + return; +} + +static void cpufreq_interactive_idle_start(void) +{ + struct cpufreq_interactive_cpuinfo *pcpu = + &per_cpu(cpuinfo, smp_processor_id()); + int pending; + + if (!down_read_trylock(&pcpu->enable_sem)) + return; + if (!pcpu->governor_enabled) { + up_read(&pcpu->enable_sem); + return; + } + + pending = timer_pending(&pcpu->cpu_timer); + + if (pcpu->target_freq != pcpu->policy->min) { + /* + * Entering idle while not at lowest speed. On some + * platforms this can hold the other CPU(s) at that speed + * even though the CPU is idle. Set a timer to re-evaluate + * speed so this idle CPU doesn't hold the other CPUs above + * min indefinitely. This should probably be a quirk of + * the CPUFreq driver. + */ + if (!pending) + cpufreq_interactive_timer_resched(pcpu); + } + + up_read(&pcpu->enable_sem); +} + +static void cpufreq_interactive_idle_end(void) +{ + struct cpufreq_interactive_cpuinfo *pcpu = + &per_cpu(cpuinfo, smp_processor_id()); + + if (!down_read_trylock(&pcpu->enable_sem)) + return; + if (!pcpu->governor_enabled) { + up_read(&pcpu->enable_sem); + return; + } + + /* Arm the timer for 1-2 ticks later if not already. */ + if (!timer_pending(&pcpu->cpu_timer)) { + cpufreq_interactive_timer_resched(pcpu); + } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) { + del_timer(&pcpu->cpu_timer); + del_timer(&pcpu->cpu_slack_timer); + cpufreq_interactive_timer(smp_processor_id()); + } + + up_read(&pcpu->enable_sem); +} + +static int cpufreq_interactive_speedchange_task(void *data) +{ + unsigned int cpu; + cpumask_t tmp_mask; + unsigned long flags; + struct cpufreq_interactive_cpuinfo *pcpu; + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + spin_lock_irqsave(&speedchange_cpumask_lock, flags); + + if (cpumask_empty(&speedchange_cpumask)) { + spin_unlock_irqrestore(&speedchange_cpumask_lock, + flags); + schedule(); + + if (kthread_should_stop()) + break; + + spin_lock_irqsave(&speedchange_cpumask_lock, flags); + } + + set_current_state(TASK_RUNNING); + tmp_mask = speedchange_cpumask; + cpumask_clear(&speedchange_cpumask); + spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); + + for_each_cpu(cpu, &tmp_mask) { + unsigned int j; + unsigned int max_freq = 0; + + pcpu = &per_cpu(cpuinfo, cpu); + if (!down_read_trylock(&pcpu->enable_sem)) + continue; + if (!pcpu->governor_enabled) { + up_read(&pcpu->enable_sem); + continue; + } + + //KT hook + if (kt_freq_control[cpu] > 0) + { + max_freq = kt_freq_control[cpu]; + goto skipcpu; + } + + for_each_cpu(j, pcpu->policy->cpus) { + struct cpufreq_interactive_cpuinfo *pjcpu = + &per_cpu(cpuinfo, j); + + if (pjcpu->target_freq > max_freq) + max_freq = pjcpu->target_freq; + + cpufreq_notify_utilization(pcpu->policy, (pcpu->cpu_load * pcpu->policy->cur) / pcpu->policy->cpuinfo.max_freq); + } + +skipcpu: + if (max_freq != pcpu->policy->cur) + __cpufreq_driver_target(pcpu->policy, + max_freq, + CPUFREQ_RELATION_H); + trace_cpufreq_interactive_setspeed(cpu, + pcpu->target_freq, + pcpu->policy->cur); + + up_read(&pcpu->enable_sem); + } + } + + return 0; +} + +static void cpufreq_interactive_boost(void) +{ + int i; + int anyboost = 0; + unsigned long flags; + struct cpufreq_interactive_cpuinfo *pcpu; + + spin_lock_irqsave(&speedchange_cpumask_lock, flags); + + for_each_online_cpu(i) { + pcpu = &per_cpu(cpuinfo, i); + + if (pcpu->target_freq < hispeed_freq) { + pcpu->target_freq = hispeed_freq; + cpumask_set_cpu(i, &speedchange_cpumask); + pcpu->hispeed_validate_time = + ktime_to_us(ktime_get()); + anyboost = 1; + } + + /* + * Set floor freq and (re)start timer for when last + * validated. + */ + + pcpu->floor_freq = hispeed_freq; + pcpu->floor_validate_time = ktime_to_us(ktime_get()); + } + + spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); + + if (anyboost) + wake_up_process(speedchange_task); +} + +static int cpufreq_interactive_notifier( + struct notifier_block *nb, unsigned long val, void *data) +{ + struct cpufreq_freqs *freq = data; + struct cpufreq_interactive_cpuinfo *pcpu; + int cpu; + unsigned long flags; + + if (val == CPUFREQ_POSTCHANGE) { + pcpu = &per_cpu(cpuinfo, freq->cpu); + if (!down_read_trylock(&pcpu->enable_sem)) + return 0; + if (!pcpu->governor_enabled) { + up_read(&pcpu->enable_sem); + return 0; + } + + for_each_cpu(cpu, pcpu->policy->cpus) { + struct cpufreq_interactive_cpuinfo *pjcpu = + &per_cpu(cpuinfo, cpu); + if (cpu != freq->cpu) { + if (!down_read_trylock(&pjcpu->enable_sem)) + continue; + if (!pjcpu->governor_enabled) { + up_read(&pjcpu->enable_sem); + continue; + } + } + spin_lock_irqsave(&pjcpu->load_lock, flags); + update_load(cpu); + spin_unlock_irqrestore(&pjcpu->load_lock, flags); + if (cpu != freq->cpu) + up_read(&pjcpu->enable_sem); + } + + up_read(&pcpu->enable_sem); + } + return 0; +} + +static struct notifier_block cpufreq_notifier_block = { + .notifier_call = cpufreq_interactive_notifier, +}; + +static unsigned int *get_tokenized_data(const char *buf, int *num_tokens) +{ + const char *cp; + int i; + int ntokens = 1; + unsigned int *tokenized_data; + int err = -EINVAL; + + cp = buf; + while ((cp = strpbrk(cp + 1, " :"))) + ntokens++; + + if (!(ntokens & 0x1)) + goto err; + + tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL); + if (!tokenized_data) { + err = -ENOMEM; + goto err; + } + + cp = buf; + i = 0; + while (i < ntokens) { + if (sscanf(cp, "%u", &tokenized_data[i++]) != 1) + goto err_kfree; + + cp = strpbrk(cp, " :"); + if (!cp) + break; + cp++; + } + + if (i != ntokens) + goto err_kfree; + + *num_tokens = ntokens; + return tokenized_data; + +err_kfree: + kfree(tokenized_data); +err: + return ERR_PTR(err); +} + +static ssize_t show_target_loads( + struct kobject *kobj, struct attribute *attr, char *buf) +{ + int i; + ssize_t ret = 0; + unsigned long flags; + + spin_lock_irqsave(&target_loads_lock, flags); + + for (i = 0; i < ntarget_loads; i++) + ret += sprintf(buf + ret, "%u%s", target_loads[i], + i & 0x1 ? ":" : " "); + + ret += sprintf(buf + ret, "\n"); + spin_unlock_irqrestore(&target_loads_lock, flags); + return ret; +} + +static ssize_t store_target_loads( + struct kobject *kobj, struct attribute *attr, const char *buf, + size_t count) +{ + int ntokens; + unsigned int *new_target_loads = NULL; + unsigned long flags; + + new_target_loads = get_tokenized_data(buf, &ntokens); + if (IS_ERR(new_target_loads)) + return PTR_RET(new_target_loads); + + spin_lock_irqsave(&target_loads_lock, flags); + if (target_loads != default_target_loads) + kfree(target_loads); + target_loads = new_target_loads; + ntarget_loads = ntokens; + spin_unlock_irqrestore(&target_loads_lock, flags); + return count; +} + +static struct global_attr target_loads_attr = + __ATTR(target_loads, S_IRUGO | S_IWUSR, + show_target_loads, store_target_loads); + +static ssize_t show_above_hispeed_delay( + struct kobject *kobj, struct attribute *attr, char *buf) +{ + int i; + ssize_t ret = 0; + unsigned long flags; + + spin_lock_irqsave(&above_hispeed_delay_lock, flags); + + for (i = 0; i < nabove_hispeed_delay; i++) + ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i], + i & 0x1 ? ":" : " "); + + ret += sprintf(buf + ret, "\n"); + spin_unlock_irqrestore(&above_hispeed_delay_lock, flags); + return ret; +} + +static ssize_t store_above_hispeed_delay( + struct kobject *kobj, struct attribute *attr, const char *buf, + size_t count) +{ + int ntokens; + unsigned int *new_above_hispeed_delay = NULL; + unsigned long flags; + + new_above_hispeed_delay = get_tokenized_data(buf, &ntokens); + if (IS_ERR(new_above_hispeed_delay)) + return PTR_RET(new_above_hispeed_delay); + + spin_lock_irqsave(&above_hispeed_delay_lock, flags); + if (above_hispeed_delay != default_above_hispeed_delay) + kfree(above_hispeed_delay); + above_hispeed_delay = new_above_hispeed_delay; + nabove_hispeed_delay = ntokens; + spin_unlock_irqrestore(&above_hispeed_delay_lock, flags); + return count; + +} + +static struct global_attr above_hispeed_delay_attr = + __ATTR(above_hispeed_delay, S_IRUGO | S_IWUSR, + show_above_hispeed_delay, store_above_hispeed_delay); + +static ssize_t show_hispeed_freq(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", hispeed_freq); +} + +static ssize_t store_hispeed_freq(struct kobject *kobj, + struct attribute *attr, const char *buf, + size_t count) +{ + int ret; + long unsigned int val; + + ret = strict_strtoul(buf, 0, &val); + if (ret < 0) + return ret; + hispeed_freq = val; + return count; +} + +static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644, + show_hispeed_freq, store_hispeed_freq); + + +static ssize_t show_go_hispeed_load(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", go_hispeed_load); +} + +static ssize_t store_go_hispeed_load(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = strict_strtoul(buf, 0, &val); + if (ret < 0) + return ret; + go_hispeed_load = val; + return count; +} + +static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644, + show_go_hispeed_load, store_go_hispeed_load); + +static ssize_t show_min_sample_time(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", min_sample_time); +} + +static ssize_t store_min_sample_time(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = strict_strtoul(buf, 0, &val); + if (ret < 0) + return ret; + min_sample_time = val; + return count; +} + +static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644, + show_min_sample_time, store_min_sample_time); + +static ssize_t show_timer_rate(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", timer_rate); +} + +static ssize_t store_timer_rate(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = strict_strtoul(buf, 0, &val); + if (ret < 0) + return ret; + timer_rate = val; + return count; +} + +static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644, + show_timer_rate, store_timer_rate); + +static ssize_t show_timer_slack( + struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", timer_slack_val); +} + +static ssize_t store_timer_slack( + struct kobject *kobj, struct attribute *attr, const char *buf, + size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtol(buf, 10, &val); + if (ret < 0) + return ret; + + timer_slack_val = val; + return count; +} + +define_one_global_rw(timer_slack); + +static ssize_t show_boost(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + return sprintf(buf, "%d\n", boost_val); +} + +static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtoul(buf, 0, &val); + if (ret < 0) + return ret; + + boost_val = val; + + if (boost_val) { + trace_cpufreq_interactive_boost("on"); + cpufreq_interactive_boost(); + } else { + trace_cpufreq_interactive_unboost("off"); + } + + return count; +} + +define_one_global_rw(boost); + +static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtoul(buf, 0, &val); + if (ret < 0) + return ret; + + boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val; + trace_cpufreq_interactive_boost("pulse"); + cpufreq_interactive_boost(); + return count; +} + +static struct global_attr boostpulse = + __ATTR(boostpulse, 0200, NULL, store_boostpulse); + +static ssize_t show_boostpulse_duration( + struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", boostpulse_duration_val); +} + +static ssize_t store_boostpulse_duration( + struct kobject *kobj, struct attribute *attr, const char *buf, + size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtoul(buf, 0, &val); + if (ret < 0) + return ret; + + boostpulse_duration_val = val; + return count; +} + +define_one_global_rw(boostpulse_duration); + +static ssize_t show_io_is_busy(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", io_is_busy); +} + +static ssize_t store_io_is_busy(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtoul(buf, 0, &val); + if (ret < 0) + return ret; + io_is_busy = val; + return count; +} + +static struct global_attr io_is_busy_attr = __ATTR(io_is_busy, 0644, + show_io_is_busy, store_io_is_busy); + +static struct attribute *interactive_attributes[] = { + &target_loads_attr.attr, + &above_hispeed_delay_attr.attr, + &hispeed_freq_attr.attr, + &go_hispeed_load_attr.attr, + &min_sample_time_attr.attr, + &timer_rate_attr.attr, + &timer_slack.attr, + &boost.attr, + &boostpulse.attr, + &boostpulse_duration.attr, + &io_is_busy_attr.attr, + NULL, +}; + +static struct attribute_group interactive_attr_group = { + .attrs = interactive_attributes, + .name = "interactive", +}; + +static int cpufreq_interactive_idle_notifier(struct notifier_block *nb, + unsigned long val, + void *data) +{ + switch (val) { + case IDLE_START: + cpufreq_interactive_idle_start(); + break; + case IDLE_END: + cpufreq_interactive_idle_end(); + break; + } + + return 0; +} + +static struct notifier_block cpufreq_interactive_idle_nb = { + .notifier_call = cpufreq_interactive_idle_notifier, +}; + +static int cpufreq_governor_interactive(struct cpufreq_policy *policy, + unsigned int event) +{ + int rc; + unsigned int j; + struct cpufreq_interactive_cpuinfo *pcpu; + struct cpufreq_frequency_table *freq_table; + + switch (event) { + case CPUFREQ_GOV_START: + if (!cpu_online(policy->cpu)) + return -EINVAL; + + mutex_lock(&gov_lock); + + freq_table = + cpufreq_frequency_get_table(policy->cpu); + if (!hispeed_freq) + hispeed_freq = policy->max; + + for_each_cpu(j, policy->cpus) { + pcpu = &per_cpu(cpuinfo, j); + pcpu->policy = policy; + pcpu->target_freq = policy->cur; + pcpu->freq_table = freq_table; + pcpu->floor_freq = pcpu->target_freq; + pcpu->floor_validate_time = + ktime_to_us(ktime_get()); + pcpu->hispeed_validate_time = + pcpu->floor_validate_time; + down_write(&pcpu->enable_sem); + cpufreq_interactive_timer_start(j); + pcpu->governor_enabled = 1; + up_write(&pcpu->enable_sem); + } + + /* + * Do not register the idle hook and create sysfs + * entries if we have already done so. + */ + if (++active_count > 1) { + mutex_unlock(&gov_lock); + return 0; + } + + rc = sysfs_create_group(cpufreq_global_kobject, + &interactive_attr_group); + if (rc) { + mutex_unlock(&gov_lock); + return rc; + } + + idle_notifier_register(&cpufreq_interactive_idle_nb); + cpufreq_register_notifier( + &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); + mutex_unlock(&gov_lock); + break; + + case CPUFREQ_GOV_STOP: + mutex_lock(&gov_lock); + for_each_cpu(j, policy->cpus) { + pcpu = &per_cpu(cpuinfo, j); + down_write(&pcpu->enable_sem); + pcpu->governor_enabled = 0; + del_timer_sync(&pcpu->cpu_timer); + del_timer_sync(&pcpu->cpu_slack_timer); + up_write(&pcpu->enable_sem); + } + + if (--active_count > 0) { + mutex_unlock(&gov_lock); + return 0; + } + + cpufreq_unregister_notifier( + &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); + idle_notifier_unregister(&cpufreq_interactive_idle_nb); + sysfs_remove_group(cpufreq_global_kobject, + &interactive_attr_group); + mutex_unlock(&gov_lock); + + break; + + case CPUFREQ_GOV_LIMITS: + if (policy->max < policy->cur) + __cpufreq_driver_target(policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > policy->cur) + __cpufreq_driver_target(policy, + policy->min, CPUFREQ_RELATION_L); + for_each_cpu(j, policy->cpus) { + pcpu = &per_cpu(cpuinfo, j); + + /* hold write semaphore to avoid race */ + down_write(&pcpu->enable_sem); + if (pcpu->governor_enabled == 0) { + up_write(&pcpu->enable_sem); + continue; + } + + /* update target_freq firstly */ + if (policy->max < pcpu->target_freq) + pcpu->target_freq = policy->max; + else if (policy->min > pcpu->target_freq) + pcpu->target_freq = policy->min; + + /* Reschedule timer. + * Delete the timers, else the timer callback may + * return without re-arm the timer when failed + * acquire the semaphore. This race may cause timer + * stopped unexpectedly. + */ + del_timer_sync(&pcpu->cpu_timer); + del_timer_sync(&pcpu->cpu_slack_timer); + cpufreq_interactive_timer_start(j); + up_write(&pcpu->enable_sem); + } + break; + } + return 0; +} + +static void cpufreq_interactive_nop_timer(unsigned long data) +{ +} + +static int __init cpufreq_interactive_init(void) +{ + unsigned int i; + struct cpufreq_interactive_cpuinfo *pcpu; + struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + + /* Initalize per-cpu timers */ + for_each_possible_cpu(i) { + pcpu = &per_cpu(cpuinfo, i); + init_timer_deferrable(&pcpu->cpu_timer); + pcpu->cpu_timer.function = cpufreq_interactive_timer; + pcpu->cpu_timer.data = i; + init_timer(&pcpu->cpu_slack_timer); + pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer; + spin_lock_init(&pcpu->load_lock); + init_rwsem(&pcpu->enable_sem); + } + + spin_lock_init(&target_loads_lock); + spin_lock_init(&speedchange_cpumask_lock); + spin_lock_init(&above_hispeed_delay_lock); + mutex_init(&gov_lock); + speedchange_task = + kthread_create(cpufreq_interactive_speedchange_task, NULL, + "cfinteractive"); + if (IS_ERR(speedchange_task)) + return PTR_ERR(speedchange_task); + + sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m); + get_task_struct(speedchange_task); + + /* NB: wake up so the thread does not look hung to the freezer */ + wake_up_process(speedchange_task); + + return cpufreq_register_governor(&cpufreq_gov_interactive); +} + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE +fs_initcall(cpufreq_interactive_init); +#else +module_init(cpufreq_interactive_init); +#endif + +static void __exit cpufreq_interactive_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_interactive); + kthread_stop(speedchange_task); + put_task_struct(speedchange_task); +} + +module_exit(cpufreq_interactive_exit); + +MODULE_AUTHOR("Mike Chan "); +MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for " + "Latency sensitive workloads"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq_ktoonservativeq.c b/drivers/cpufreq/cpufreq_ktoonservativeq.c new file mode 100644 index 00000000..fc081c10 --- /dev/null +++ b/drivers/cpufreq/cpufreq_ktoonservativeq.c @@ -0,0 +1,1608 @@ +/* + * drivers/cpufreq/cpufreq_ktoonservative.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * (C) 2009 Alexander Clouter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_CPU_DOWN_BLOCK_CYCLES (11) +#define DEF_BOOST_CPU (1134000) +#define DEF_BOOST_GPU (450) +#define DEF_BOOST_HOLD_CYCLES (22) +#define DEF_DISABLE_HOTPLUGGING (0) +#define CPUS_AVAILABLE num_possible_cpus() +static int hotplug_cpu_enable_up[] = { 0, 58, 68, 78 }; +static int hotplug_cpu_enable_down[] = { 0, 35, 45, 55 }; +static int hotplug_cpu_single_up[] = { 0, 0, 0, 0 }; +static int hotplug_cpu_single_down[] = { 0, 0, 0, 0 }; +static int hotplug_cpu_lockout[] = { 0, 0, 0, 0 }; +static bool hotplug_flag_on = false; +static unsigned int Lcpu_hotplug_block_cycles = 0; +static bool hotplug_flag_off = false; +static bool disable_hotplugging_chrg_override; + +void setExtraCores(unsigned int requested_freq); +unsigned int kt_freq_control[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. + * All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) + +static bool disable_hotplug_bt_active = false; +static unsigned int min_sampling_rate; +static unsigned int stored_sampling_rate = 45000; +static unsigned int Lcpu_down_block_cycles = 0; +static unsigned int Lcpu_up_block_cycles = 0; +static bool boostpulse_relayf = false; +static int boost_hold_cycles_cnt = 0; +static bool screen_is_on = true; + +extern void ktoonservative_is_active(bool val); +extern void ktoonservative_is_activebd(bool val); +extern void boost_the_gpu(int freq, int cycles); + +extern void apenable_auto_hotplug(bool state); +extern bool apget_enable_auto_hotplug(void); +static bool prev_apenable; +static bool hotplugInProgress = false; + +//extern void kt_is_active_benabled_gpio(bool val); +extern void kt_is_active_benabled_touchkey(bool val); +//extern void kt_is_active_benabled_power(bool val); +extern unsigned int get_cable_state(void); +extern void ktoonservative_is_activechrg(bool val); + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (10) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +struct work_struct hotplug_offline_work; +struct work_struct hotplug_online_work; + +static void do_dbs_timer(struct work_struct *work); + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + unsigned int down_skip; + unsigned int requested_freq; + int cpu; + unsigned int enable:1; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct workqueue_struct *dbs_wq; + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int sampling_rate_screen_off; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int up_threshold_hotplug_1; + unsigned int up_threshold_hotplug_2; + unsigned int up_threshold_hotplug_3; + unsigned int down_threshold; + unsigned int down_threshold_hotplug_1; + unsigned int down_threshold_hotplug_2; + unsigned int down_threshold_hotplug_3; + unsigned int cpu_down_block_cycles; + unsigned int cpu_hotplug_block_cycles; + unsigned int touch_boost_cpu; + unsigned int touch_boost_cpu_all_cores; + unsigned int touch_boost_2nd_core; + unsigned int touch_boost_3rd_core; + unsigned int touch_boost_4th_core; + unsigned int boost_2nd_core_on_button; + unsigned int boost_3rd_core_on_button; + unsigned int boost_4th_core_on_button; + unsigned int lockout_2nd_core_hotplug; + unsigned int lockout_3rd_core_hotplug; + unsigned int lockout_4th_core_hotplug; + //unsigned int touch_boost_gpu; + unsigned int sync_extra_cores; + unsigned int boost_hold_cycles; + unsigned int disable_hotplugging; + unsigned int disable_hotplugging_chrg; + unsigned int disable_hotplug_bt; + unsigned int no_extra_cores_screen_off; + unsigned int ignore_nice; + unsigned int freq_step; +} dbs_tuners_ins = { + .up_threshold = 57, + .up_threshold_hotplug_1 = 58, + .up_threshold_hotplug_2 = 68, + .up_threshold_hotplug_3 = 78, + .down_threshold = 52, + .down_threshold_hotplug_1 = 35, + .down_threshold_hotplug_2 = 45, + .down_threshold_hotplug_3 = 55, + .cpu_down_block_cycles = DEF_CPU_DOWN_BLOCK_CYCLES, + .cpu_hotplug_block_cycles = DEF_CPU_DOWN_BLOCK_CYCLES, + .touch_boost_cpu = DEF_BOOST_CPU, + .touch_boost_cpu_all_cores = 0, + .touch_boost_2nd_core = 1, + .touch_boost_3rd_core = 0, + .touch_boost_4th_core = 0, + .boost_2nd_core_on_button = 1, + .boost_3rd_core_on_button = 0, + .boost_4th_core_on_button = 0, + .lockout_2nd_core_hotplug = 0, + .lockout_3rd_core_hotplug = 0, + .lockout_4th_core_hotplug = 0, + //.touch_boost_gpu = DEF_BOOST_GPU, + .sync_extra_cores = 0, + .boost_hold_cycles = DEF_BOOST_HOLD_CYCLES, + .disable_hotplugging = DEF_DISABLE_HOTPLUGGING, + .disable_hotplugging_chrg = 0, + .disable_hotplug_bt = 0, + .no_extra_cores_screen_off = 1, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .sampling_rate_screen_off = 45000, + .ignore_nice = 0, + .freq_step = 5, +}; + +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, + u64 *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + + return idle_time; +} + +/* keep track of frequency transitions */ +static int +dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, + freq->cpu); + + struct cpufreq_policy *policy; + + if (!this_dbs_info->enable) + return 0; + + policy = this_dbs_info->cur_policy; + + /* + * we only care if our internally tracked freq moves outside + * the 'valid' ranges of freqency available to us otherwise + * we do not change it + */ + if (this_dbs_info->requested_freq > policy->max + || this_dbs_info->requested_freq < policy->min) + this_dbs_info->requested_freq = freq->new; + + return 0; +} + +static struct notifier_block dbs_cpufreq_notifier_block = { + .notifier_call = dbs_cpufreq_notifier +}; + +void set_bluetooth_state_kt(bool val) +{ + if (val == true && dbs_tuners_ins.disable_hotplug_bt == 1) + { + disable_hotplug_bt_active = true; + if (num_online_cpus() < 2) + { + int cpu; + for (cpu = 1; cpu < CPUS_AVAILABLE; cpu++) + { + if (!cpu_online(cpu)) + hotplug_cpu_single_up[cpu] = 1; + } + if (!hotplugInProgress) + queue_work_on(0, dbs_wq, &hotplug_online_work); + } + } + else + disable_hotplug_bt_active = false; +} + +void send_cable_state_kt(unsigned int state) +{ + int cpu; + if (state && dbs_tuners_ins.disable_hotplugging_chrg) + { + disable_hotplugging_chrg_override = true; + for (cpu = 1; cpu < CPUS_AVAILABLE; cpu++) + hotplug_cpu_single_up[cpu] = 1; + if (!hotplugInProgress) + queue_work_on(0, dbs_wq, &hotplug_online_work); + } + else + { + disable_hotplugging_chrg_override = false; + } +} + +/************************** sysfs interface ************************/ +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} +define_one_global_ro(sampling_rate_min); + +static ssize_t show_touch_boost_cpu(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", dbs_tuners_ins.touch_boost_cpu / 1000); +} + +static ssize_t show_touch_boost_cpu_all_cores(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", dbs_tuners_ins.touch_boost_cpu_all_cores); +} + +static ssize_t show_sync_extra_cores(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", dbs_tuners_ins.sync_extra_cores); +} + +/* cpufreq_ktoonservative Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(sampling_rate_screen_off, sampling_rate_screen_off); +show_one(sampling_down_factor, sampling_down_factor); +show_one(up_threshold, up_threshold); +show_one(up_threshold_hotplug_1, up_threshold_hotplug_1); +show_one(up_threshold_hotplug_2, up_threshold_hotplug_2); +show_one(up_threshold_hotplug_3, up_threshold_hotplug_3); +show_one(down_threshold, down_threshold); +show_one(down_threshold_hotplug_1, down_threshold_hotplug_1); +show_one(down_threshold_hotplug_2, down_threshold_hotplug_2); +show_one(down_threshold_hotplug_3, down_threshold_hotplug_3); +show_one(cpu_down_block_cycles, cpu_down_block_cycles); +show_one(cpu_hotplug_block_cycles, cpu_hotplug_block_cycles); +show_one(touch_boost_2nd_core, touch_boost_2nd_core); +show_one(touch_boost_3rd_core, touch_boost_3rd_core); +show_one(touch_boost_4th_core, touch_boost_4th_core); +show_one(boost_2nd_core_on_button, boost_2nd_core_on_button); +show_one(boost_3rd_core_on_button, boost_3rd_core_on_button); +show_one(boost_4th_core_on_button, boost_4th_core_on_button); +show_one(lockout_2nd_core_hotplug, lockout_2nd_core_hotplug); +show_one(lockout_3rd_core_hotplug, lockout_3rd_core_hotplug); +show_one(lockout_4th_core_hotplug, lockout_4th_core_hotplug); +//show_one(touch_boost_gpu, touch_boost_gpu); +show_one(boost_hold_cycles, boost_hold_cycles); +show_one(disable_hotplugging, disable_hotplugging); +show_one(disable_hotplugging_chrg, disable_hotplugging_chrg); +show_one(disable_hotplug_bt, disable_hotplug_bt); +show_one(no_extra_cores_screen_off, no_extra_cores_screen_off); +show_one(ignore_nice_load, ignore_nice); +show_one(freq_step, freq_step); + +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + + dbs_tuners_ins.sampling_down_factor = input; + return count; +} + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + stored_sampling_rate = max(input, min_sampling_rate); + return count; +} + +static ssize_t store_sampling_rate_screen_off(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.sampling_rate_screen_off = max(input, min_sampling_rate); + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > 100 || + input <= dbs_tuners_ins.down_threshold) + return -EINVAL; + + dbs_tuners_ins.up_threshold = input; + return count; +} + +static ssize_t store_up_threshold_hotplug_1(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > 100 || + input <= dbs_tuners_ins.down_threshold_hotplug_1) + return -EINVAL; + + dbs_tuners_ins.up_threshold_hotplug_1 = input; + hotplug_cpu_enable_up[1] = input; + return count; +} + +static ssize_t store_up_threshold_hotplug_2(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > 100 || + input <= dbs_tuners_ins.down_threshold_hotplug_2) + return -EINVAL; + + dbs_tuners_ins.up_threshold_hotplug_2 = input; + hotplug_cpu_enable_up[2] = input; + return count; +} + +static ssize_t store_up_threshold_hotplug_3(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > 100 || + input <= dbs_tuners_ins.down_threshold_hotplug_3) + return -EINVAL; + + dbs_tuners_ins.up_threshold_hotplug_3 = input; + hotplug_cpu_enable_up[3] = input; + return count; +} + +static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + /* cannot be lower than 11 otherwise freq will not fall */ + if (ret != 1 || input < 11 || input > 100 || + input >= dbs_tuners_ins.up_threshold) + return -EINVAL; + + dbs_tuners_ins.down_threshold = input; + return count; +} + +static ssize_t store_down_threshold_hotplug_1(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + /* cannot be lower than 11 otherwise freq will not fall */ + if (ret != 1 || input < 11 || input > 100 || + input >= dbs_tuners_ins.up_threshold_hotplug_1) + return -EINVAL; + + dbs_tuners_ins.down_threshold_hotplug_1 = input; + hotplug_cpu_enable_down[1] = input; + return count; +} + +static ssize_t store_down_threshold_hotplug_2(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + /* cannot be lower than 11 otherwise freq will not fall */ + if (ret != 1 || input < 11 || input > 100 || + input >= dbs_tuners_ins.up_threshold_hotplug_2) + return -EINVAL; + + dbs_tuners_ins.down_threshold_hotplug_2 = input; + hotplug_cpu_enable_down[2] = input; + return count; +} + +static ssize_t store_down_threshold_hotplug_3(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + /* cannot be lower than 11 otherwise freq will not fall */ + if (ret != 1 || input < 11 || input > 100 || + input >= dbs_tuners_ins.up_threshold_hotplug_3) + return -EINVAL; + + dbs_tuners_ins.down_threshold_hotplug_3 = input; + hotplug_cpu_enable_down[3] = input; + return count; +} + +static ssize_t store_cpu_down_block_cycles(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + /* cannot be lower than 11 otherwise freq will not fall */ + if (input < 0) + return -EINVAL; + + dbs_tuners_ins.cpu_down_block_cycles = input; + return count; +} + +static ssize_t store_cpu_hotplug_block_cycles(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + /* cannot be lower than 11 otherwise freq will not fall */ + if (input < 0) + return -EINVAL; + + dbs_tuners_ins.cpu_hotplug_block_cycles = input; + return count; +} + +static ssize_t store_touch_boost_cpu(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input * 1000 > GLOBALKT_MAX_FREQ_LIMIT) + input = GLOBALKT_MAX_FREQ_LIMIT; + if (input * 1000 < 0) + input = 0; + dbs_tuners_ins.touch_boost_cpu = input * 1000; + return count; +} + +static ssize_t store_touch_boost_cpu_all_cores(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret, i; + + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input != 0 && input != 1) + input = 1; + dbs_tuners_ins.touch_boost_cpu_all_cores = input; + + if (dbs_tuners_ins.sync_extra_cores == 0 && dbs_tuners_ins.touch_boost_cpu_all_cores == 0) + { + for (i = 0; i < CPUS_AVAILABLE; i++) + kt_freq_control[i] = 0; + } + return count; +} + +static ssize_t store_sync_extra_cores(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret, i; + + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input != 0 && input != 1) + input = 1; + dbs_tuners_ins.sync_extra_cores = input; + + if (dbs_tuners_ins.sync_extra_cores == 0 && dbs_tuners_ins.touch_boost_cpu_all_cores == 0) + { + for (i = 0; i < CPUS_AVAILABLE; i++) + kt_freq_control[i] = 0; + } + return count; +} + +static ssize_t store_touch_boost_2nd_core(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (input != 0 && input != 1) + input = 0; + + dbs_tuners_ins.touch_boost_2nd_core = input; + return count; +} + +static ssize_t store_touch_boost_3rd_core(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (input != 0 && input != 1) + input = 0; + + dbs_tuners_ins.touch_boost_3rd_core = input; + return count; +} + +static ssize_t store_touch_boost_4th_core(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (input != 0 && input != 1) + input = 0; + + dbs_tuners_ins.touch_boost_4th_core = input; + return count; +} + +static ssize_t store_lockout_2nd_core_hotplug(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret, cpu; + ret = sscanf(buf, "%u", &input); + + if (input != 0 && input != 1 && input != 2) + input = 0; + + dbs_tuners_ins.lockout_2nd_core_hotplug = input; + hotplug_cpu_lockout[1] = input; + if (input == 1) + { + hotplug_cpu_single_up[1] = 1; + if (!hotplugInProgress) + queue_work_on(0, dbs_wq, &hotplug_online_work); + } + else if (input == 2) + { + hotplug_cpu_single_down[1] = 1; + if (!hotplugInProgress) + queue_work_on(0, dbs_wq, &hotplug_offline_work); + } + return count; +} + +static ssize_t store_lockout_3rd_core_hotplug(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret, cpu; + ret = sscanf(buf, "%u", &input); + + if (input != 0 && input != 1 && input != 2) + input = 0; + + dbs_tuners_ins.lockout_3rd_core_hotplug = input; + hotplug_cpu_lockout[2] = input; + if (input == 1) + { + hotplug_cpu_single_up[2] = 1; + if (!hotplugInProgress) + queue_work_on(0, dbs_wq, &hotplug_online_work); + } + else if (input == 2) + { + hotplug_cpu_single_down[2] = 1; + if (!hotplugInProgress) + queue_work_on(0, dbs_wq, &hotplug_offline_work); + } + return count; +} + +static ssize_t store_lockout_4th_core_hotplug(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret, cpu; + ret = sscanf(buf, "%u", &input); + + if (input != 0 && input != 1 && input != 2) + input = 0; + + dbs_tuners_ins.lockout_4th_core_hotplug = input; + hotplug_cpu_lockout[3] = input; + if (input == 1) + { + hotplug_cpu_single_up[3] = 1; + if (!hotplugInProgress) + queue_work_on(0, dbs_wq, &hotplug_online_work); + } + else if (input == 2) + { + hotplug_cpu_single_down[3] = 1; + if (!hotplugInProgress) + queue_work_on(0, dbs_wq, &hotplug_offline_work); + } + return count; +} + +/*static ssize_t store_touch_boost_gpu(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (input != 100 && input != 160 && input != 266 && input != 350 && input != 400 && input != 450 && input != 533 && input != 612) + input = 0; + + dbs_tuners_ins.touch_boost_gpu = input; + return count; +}*/ + +static ssize_t store_boost_hold_cycles(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (input < 0) + return -EINVAL; + + dbs_tuners_ins.boost_hold_cycles = input; + return count; +} + +static ssize_t store_disable_hotplugging(struct kobject *a, struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + int ret, cpu; + ret = sscanf(buf, "%u", &input); + + if (input != 0 && input != 1) + input = 0; + + dbs_tuners_ins.disable_hotplugging = input; + if (input == 1) + { + for (cpu = 1; cpu < CPUS_AVAILABLE; cpu++) + hotplug_cpu_single_up[cpu] = 1; + if (!hotplugInProgress) + queue_work_on(0, dbs_wq, &hotplug_online_work); + } + return count; +} + +static ssize_t store_disable_hotplugging_chrg(struct kobject *a, struct attribute *b, const char *buf, size_t count) +{ + unsigned int input, c_state; + int ret, cpu; + ret = sscanf(buf, "%u", &input); + + if (input != 0 && input != 1) + input = 0; + + dbs_tuners_ins.disable_hotplugging_chrg = input; + c_state = get_cable_state(); + send_cable_state_kt(c_state); + + return count; +} + +static ssize_t store_no_extra_cores_screen_off(struct kobject *a, struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (input != 0 && input != 1) + input = 0; + + dbs_tuners_ins.no_extra_cores_screen_off = input; + return count; +} + +static ssize_t store_boost_2nd_core_on_button(struct kobject *a, struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (input != 0 && input != 1) + input = 0; + + dbs_tuners_ins.boost_2nd_core_on_button = input; + if (dbs_tuners_ins.boost_2nd_core_on_button == 1) + { + //kt_is_active_benabled_gpio(true); + kt_is_active_benabled_touchkey(true); + //kt_is_active_benabled_power(true); + } + + return count; +} + +static ssize_t store_boost_3rd_core_on_button(struct kobject *a, struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (input != 0 && input != 1) + input = 0; + + dbs_tuners_ins.boost_3rd_core_on_button = input; + if (dbs_tuners_ins.boost_3rd_core_on_button == 1) + { + //kt_is_active_benabled_gpio(true); + kt_is_active_benabled_touchkey(true); + //kt_is_active_benabled_power(true); + } + + return count; +} + +static ssize_t store_boost_4th_core_on_button(struct kobject *a, struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (input != 0 && input != 1) + input = 0; + + dbs_tuners_ins.boost_4th_core_on_button = input; + if (dbs_tuners_ins.boost_4th_core_on_button == 1) + { + //kt_is_active_benabled_gpio(true); + kt_is_active_benabled_touchkey(true); + //kt_is_active_benabled_power(true); + } + + return count; +} + +static ssize_t store_disable_hotplug_bt(struct kobject *a, struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (input != 0 && input != 1) + input = 0; + + dbs_tuners_ins.disable_hotplug_bt = input; + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */ + return count; + + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(cs_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + return count; +} + +static ssize_t store_freq_step(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input > 100) + input = 100; + + /* no need to test here if freq_step is zero as the user might actually + * want this, they would be crazy though :) */ + dbs_tuners_ins.freq_step = input; + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(sampling_rate_screen_off); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(up_threshold); +define_one_global_rw(up_threshold_hotplug_1); +define_one_global_rw(up_threshold_hotplug_2); +define_one_global_rw(up_threshold_hotplug_3); +define_one_global_rw(down_threshold); +define_one_global_rw(down_threshold_hotplug_1); +define_one_global_rw(down_threshold_hotplug_2); +define_one_global_rw(down_threshold_hotplug_3); +define_one_global_rw(cpu_down_block_cycles); +define_one_global_rw(cpu_hotplug_block_cycles); +define_one_global_rw(touch_boost_cpu); +define_one_global_rw(touch_boost_cpu_all_cores); +define_one_global_rw(touch_boost_2nd_core); +define_one_global_rw(touch_boost_3rd_core); +define_one_global_rw(touch_boost_4th_core); +define_one_global_rw(boost_2nd_core_on_button); +define_one_global_rw(boost_3rd_core_on_button); +define_one_global_rw(boost_4th_core_on_button); +define_one_global_rw(lockout_2nd_core_hotplug); +define_one_global_rw(lockout_3rd_core_hotplug); +define_one_global_rw(lockout_4th_core_hotplug); +//define_one_global_rw(touch_boost_gpu); +define_one_global_rw(sync_extra_cores); +define_one_global_rw(boost_hold_cycles); +define_one_global_rw(disable_hotplugging); +define_one_global_rw(disable_hotplugging_chrg); +define_one_global_rw(disable_hotplug_bt); +define_one_global_rw(no_extra_cores_screen_off); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(freq_step); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &sampling_rate_screen_off.attr, + &sampling_down_factor.attr, + &up_threshold.attr, + &up_threshold_hotplug_1.attr, + &up_threshold_hotplug_2.attr, + &up_threshold_hotplug_3.attr, + &down_threshold.attr, + &down_threshold_hotplug_1.attr, + &down_threshold_hotplug_2.attr, + &down_threshold_hotplug_3.attr, + &cpu_down_block_cycles.attr, + &cpu_hotplug_block_cycles.attr, + &touch_boost_cpu.attr, + &touch_boost_cpu_all_cores.attr, + &touch_boost_2nd_core.attr, + &touch_boost_3rd_core.attr, + &touch_boost_4th_core.attr, + &boost_2nd_core_on_button.attr, + &boost_3rd_core_on_button.attr, + &boost_4th_core_on_button.attr, + &lockout_2nd_core_hotplug.attr, + &lockout_3rd_core_hotplug.attr, + &lockout_4th_core_hotplug.attr, + //&touch_boost_gpu.attr, + &sync_extra_cores.attr, + &boost_hold_cycles.attr, + &disable_hotplugging.attr, + &disable_hotplugging_chrg.attr, + &disable_hotplug_bt.attr, + &no_extra_cores_screen_off.attr, + &ignore_nice_load.attr, + &freq_step.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "ktoonservativeq", +}; + +/************************** sysfs end ************************/ + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + unsigned int load = 0; + unsigned int max_load = 0; + unsigned int freq_target; + int cpu; + struct cpufreq_policy *policy; + unsigned int j; + + policy = this_dbs_info->cur_policy; + + if (boostpulse_relayf) + { + if (stored_sampling_rate != 0 && screen_is_on) + dbs_tuners_ins.sampling_rate = stored_sampling_rate; + this_dbs_info->down_skip = 0; + + if (boost_hold_cycles_cnt >= dbs_tuners_ins.boost_hold_cycles) + { + boostpulse_relayf = false; + boost_hold_cycles_cnt = 0; + if (dbs_tuners_ins.sync_extra_cores == 0) + { + for (cpu = 0; cpu < CPUS_AVAILABLE; cpu++) + kt_freq_control[cpu] = 0; + } + goto boostcomplete; + } + boost_hold_cycles_cnt++; + + if (dbs_tuners_ins.touch_boost_cpu_all_cores && policy->cpu == 0) + { + for (cpu = 1; cpu < CPUS_AVAILABLE; cpu++) + { + if (&trmlpolicy[cpu] != NULL) + { + if (cpu_online(cpu)) + { + if (trmlpolicy[cpu].cur < dbs_tuners_ins.touch_boost_cpu) + { + //__cpufreq_driver_target(&trmlpolicy[cpu], dbs_tuners_ins.touch_boost_cpu, + // CPUFREQ_RELATION_H); + kt_freq_control[cpu] = dbs_tuners_ins.touch_boost_cpu; + //pr_alert("BOOST EXTRA CPUs: %d\n", cpu); + } + } + } + } + } + + /* if we are already at full speed then break out early */ + if (this_dbs_info->requested_freq == policy->max || policy->cur > dbs_tuners_ins.touch_boost_cpu || this_dbs_info->requested_freq > dbs_tuners_ins.touch_boost_cpu) + return; + + this_dbs_info->requested_freq = dbs_tuners_ins.touch_boost_cpu; + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, + CPUFREQ_RELATION_H); +boostcomplete: + return; + } + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate*sampling_down_factor, we check, if current + * idle time is more than 80%, then we try to decrease frequency + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of maximum frequency + */ + + /* Get Absolute Load */ + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time; + unsigned int idle_time, wall_time; + + j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + + wall_time = (unsigned int) + (cur_wall_time - j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) + (cur_idle_time - j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + if (dbs_tuners_ins.ignore_nice) { + u64 cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + if (load > max_load) + max_load = load; + //max_load += load; + //pr_alert("LOAD CHECK2: %d-%d", load, max_load); + } + //max_load = max_load / num_online_cpus(); + /* + * break out if we 'cannot' reduce the speed as the user might + * want freq_step to be zero + */ + if (dbs_tuners_ins.freq_step == 0) + return; + + if (policy->cpu == 0) + { + for (cpu = 1; cpu < CPUS_AVAILABLE; cpu++) + { + if (max_load >= hotplug_cpu_enable_up[cpu] && (!cpu_online(cpu)) && hotplug_cpu_lockout[cpu] != 2) + { + if (Lcpu_hotplug_block_cycles > dbs_tuners_ins.cpu_hotplug_block_cycles) + { + hotplug_cpu_single_up[cpu] = 1; + hotplug_flag_on = true; + Lcpu_hotplug_block_cycles = 0; + } + Lcpu_hotplug_block_cycles++; + break; + } + else if (max_load <= hotplug_cpu_enable_down[CPUS_AVAILABLE - cpu] && (cpu_online(CPUS_AVAILABLE - cpu)) && hotplug_cpu_lockout[CPUS_AVAILABLE - cpu] != 1) + { + hotplug_cpu_single_down[CPUS_AVAILABLE - cpu] = 1; + hotplug_flag_off = true; + break; + } + } + //pr_alert("LOAD CHECK: %d-%d-%d-%d-%d-%d-%d\n", max_load, hotplug_cpu_single_up[1], hotplug_cpu_single_up[2], hotplug_cpu_single_up[3], hotplug_cpu_enable_up[1], hotplug_cpu_enable_up[2], hotplug_cpu_enable_up[3]); + + /* Check for frequency increase is greater than hotplug value */ + //CPUS_AVAILABLE + if (hotplug_flag_on) { + if (policy->cur > (policy->min * 2)) + { + if (Lcpu_up_block_cycles > dbs_tuners_ins.cpu_down_block_cycles && (dbs_tuners_ins.no_extra_cores_screen_off == 0 || (dbs_tuners_ins.no_extra_cores_screen_off == 1 && screen_is_on))) + { + hotplug_flag_on = false; + if (!hotplugInProgress && policy->cpu == 0) + queue_work_on(policy->cpu, dbs_wq, &hotplug_online_work); + Lcpu_up_block_cycles = 0; + } + Lcpu_up_block_cycles++; + } + } + } + + /* Check for frequency increase */ + if (max_load > dbs_tuners_ins.up_threshold) { + this_dbs_info->down_skip = 0; + + /* if we are already at full speed then break out early */ + if (this_dbs_info->requested_freq == policy->max) + return; + + freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; + + /* max freq cannot be less than 100. But who knows.... */ + if (unlikely(freq_target == 0)) + freq_target = 5; + + this_dbs_info->requested_freq += freq_target; + if (this_dbs_info->requested_freq > policy->max) + this_dbs_info->requested_freq = policy->max; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, CPUFREQ_RELATION_H); + if (dbs_tuners_ins.sync_extra_cores && policy->cpu == 0) + setExtraCores(this_dbs_info->requested_freq); + return; + } + + if (policy->cpu == 0 && hotplug_flag_off && !dbs_tuners_ins.disable_hotplugging && !disable_hotplugging_chrg_override && disable_hotplug_bt_active == false) { + if (num_online_cpus() > 1) + { + if (Lcpu_down_block_cycles > dbs_tuners_ins.cpu_down_block_cycles) + { + hotplug_flag_off = false; + if (!hotplugInProgress && policy->cpu == 0) + queue_work_on(policy->cpu, dbs_wq, &hotplug_offline_work); + Lcpu_down_block_cycles = 0; + } + Lcpu_down_block_cycles++; + } + } + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (max_load < (dbs_tuners_ins.down_threshold - 10)) { + freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; + + this_dbs_info->requested_freq -= freq_target; + if (this_dbs_info->requested_freq < policy->min) + this_dbs_info->requested_freq = policy->min; + + /* + * if we cannot reduce the frequency anymore, break out early + */ + if (policy->cur == policy->min) + return; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, CPUFREQ_RELATION_H); + if (dbs_tuners_ins.sync_extra_cores && policy->cpu == 0) + setExtraCores(this_dbs_info->requested_freq); + return; + } +} + +void setExtraCores(unsigned int requested_freq) +{ + unsigned int cpu; + for (cpu = 1; cpu < CPUS_AVAILABLE; cpu++) + { + if (&trmlpolicy[cpu] != NULL) + { + if (cpu_online(cpu)) + { + //__cpufreq_driver_target(&trmlpolicy[cpu], requested_freq, CPUFREQ_RELATION_H); + kt_freq_control[cpu] = requested_freq; + //pr_alert("BOOST EXTRA CPUs: %d\n", cpu); + } + } + } +} + +void check_boost_cores_up(bool dec1, bool dec2, bool dec3) +{ + bool got_boost_core = false; + + if (!cpu_online(1) && dec1 && hotplug_cpu_lockout[1] != 2) + { + hotplug_cpu_single_up[1] = 1; + got_boost_core = true; + } + if (!cpu_online(2) && dec2 && hotplug_cpu_lockout[2] != 2) + { + hotplug_cpu_single_up[2] = 1; + got_boost_core = true; + } + if (!cpu_online(3) && dec3 && hotplug_cpu_lockout[3] != 2) + { + hotplug_cpu_single_up[3] = 1; + got_boost_core = true; + } + if (got_boost_core) + { + if (!hotplugInProgress) + queue_work_on(0, dbs_wq, &hotplug_online_work); + } +} + +void screen_is_on_relay_kt(bool state) +{ + screen_is_on = state; + if (state == true) + { + if (stored_sampling_rate > 0) + dbs_tuners_ins.sampling_rate = stored_sampling_rate; //max(input, min_sampling_rate); + + check_boost_cores_up(dbs_tuners_ins.boost_2nd_core_on_button, dbs_tuners_ins.boost_3rd_core_on_button, dbs_tuners_ins.boost_4th_core_on_button); + + //pr_alert("SCREEN_IS_ON1: %d-%d\n", dbs_tuners_ins.sampling_rate, stored_sampling_rate); + } + else + { + stored_sampling_rate = dbs_tuners_ins.sampling_rate; + dbs_tuners_ins.sampling_rate = dbs_tuners_ins.sampling_rate_screen_off; + //pr_alert("SCREEN_IS_ON2: %d-%d\n", dbs_tuners_ins.sampling_rate, stored_sampling_rate); + } + +} + +void boostpulse_relay_kt(void) +{ + if (!boostpulse_relayf) + { + bool got_boost_core = false; + + if (dbs_tuners_ins.touch_boost_2nd_core == 0 && dbs_tuners_ins.touch_boost_3rd_core == 0 && dbs_tuners_ins.touch_boost_4th_core == 0 && dbs_tuners_ins.touch_boost_cpu == 0) // && dbs_tuners_ins.touch_boost_gpu == 0) + return; + /*if (dbs_tuners_ins.touch_boost_gpu > 0) + { + int bpc = (dbs_tuners_ins.boost_hold_cycles / 2); + if (dbs_tuners_ins.boost_hold_cycles > 0) + boost_the_gpu(dbs_tuners_ins.touch_boost_gpu, bpc); + else + boost_the_gpu(dbs_tuners_ins.touch_boost_gpu, 0); + }*/ + check_boost_cores_up(dbs_tuners_ins.touch_boost_2nd_core, dbs_tuners_ins.touch_boost_3rd_core, dbs_tuners_ins.touch_boost_4th_core); + + boostpulse_relayf = true; + boost_hold_cycles_cnt = 0; + //dbs_tuners_ins.sampling_rate = min_sampling_rate; + //pr_info("BOOSTPULSE RELAY KT"); + } + else + { + /*if (dbs_tuners_ins.touch_boost_gpu > 0) + { + int bpc = (dbs_tuners_ins.boost_hold_cycles / 2); + if (dbs_tuners_ins.boost_hold_cycles > 0) + boost_the_gpu(dbs_tuners_ins.touch_boost_gpu, bpc); + else + boost_the_gpu(dbs_tuners_ins.touch_boost_gpu, 0); + }*/ + boost_hold_cycles_cnt = 0; + } +} + +static void __cpuinit hotplug_offline_work_fn(struct work_struct *work) +{ + int cpu; + //pr_info("ENTER OFFLINE"); + for_each_online_cpu(cpu) { + if (likely(cpu_online(cpu) && (cpu))) { + if (hotplug_cpu_single_down[cpu]) + { + hotplug_cpu_single_down[cpu] = 0; + cpu_down(cpu); + } + //pr_info("auto_hotplug: CPU%d down.\n", cpu); + } + } + hotplugInProgress = false; +} + +static void __cpuinit hotplug_online_work_fn(struct work_struct *work) +{ + int cpu; + //pr_info("ENTER ONLINE"); + for_each_possible_cpu(cpu) { + if (likely(!cpu_online(cpu) && (cpu))) { + if (hotplug_cpu_single_up[cpu]) + { + hotplug_cpu_single_up[cpu] = 0; + cpu_up(cpu); + } + //pr_info("auto_hotplug: CPU%d up.\n", cpu); + } + } + hotplugInProgress = false; +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + delay -= jiffies % delay; + + mutex_lock(&dbs_info->timer_mutex); + + dbs_check_cpu(dbs_info); + + queue_delayed_work_on(cpu, dbs_wq, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + delay -= jiffies % delay; + + dbs_info->enable = 1; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + queue_delayed_work_on(dbs_info->cpu, dbs_wq, &dbs_info->work, delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + dbs_info->enable = 0; + cancel_delayed_work_sync(&dbs_info->work); + cancel_work_sync(&hotplug_offline_work); + cancel_work_sync(&hotplug_online_work); +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + ktoonservative_is_active(true); + ktoonservative_is_activebd(true); + ktoonservative_is_activechrg(true); + if (dbs_tuners_ins.boost_2nd_core_on_button == 1 || dbs_tuners_ins.boost_3rd_core_on_button == 1 || dbs_tuners_ins.boost_4th_core_on_button == 1) + { + //kt_is_active_benabled_gpio(true); + kt_is_active_benabled_touchkey(true); + //kt_is_active_benabled_power(true); + } + + prev_apenable = apget_enable_auto_hotplug(); + apenable_auto_hotplug(false); + + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + } + this_dbs_info->cpu = cpu; + this_dbs_info->down_skip = 0; + this_dbs_info->requested_freq = policy->cur; + + mutex_init(&this_dbs_info->timer_mutex); + dbs_enable++; + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + min_sampling_rate = (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) / 20; + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = 45000; + //max((min_sampling_rate * 20), + //latency * LATENCY_MULTIPLIER); + + cpufreq_register_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + mutex_unlock(&dbs_mutex); + + dbs_timer_init(this_dbs_info); + + break; + + case CPUFREQ_GOV_STOP: + ktoonservative_is_active(false); + ktoonservative_is_activebd(false); + ktoonservative_is_activechrg(false); + //kt_is_active_benabled_gpio(false); + kt_is_active_benabled_touchkey(false); + //kt_is_active_benabled_power(false); + + apenable_auto_hotplug(prev_apenable); + + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + dbs_enable--; + mutex_destroy(&this_dbs_info->timer_mutex); + + /* + * Stop the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 0) + cpufreq_unregister_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + { + __cpufreq_driver_target(this_dbs_info->cur_policy, policy->max, CPUFREQ_RELATION_H); + } + else if (policy->min > this_dbs_info->cur_policy->cur) + { + __cpufreq_driver_target(this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); + } + dbs_check_cpu(this_dbs_info); + mutex_unlock(&this_dbs_info->timer_mutex); + + break; + } + return 0; +} + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_KTOONSERVATIVEQ +static +#endif +struct cpufreq_governor cpufreq_gov_ktoonservative = { + .name = "ktoonservativeq", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +static int __init cpufreq_gov_dbs_init(void) +{ + dbs_wq = alloc_workqueue("ktoonservativeq_dbs_wq", WQ_HIGHPRI, 0); + if (!dbs_wq) { + printk(KERN_ERR "Failed to create ktoonservativeq_dbs_wq workqueue\n"); + return -EFAULT; + } + + INIT_WORK(&hotplug_offline_work, hotplug_offline_work_fn); + INIT_WORK(&hotplug_online_work, hotplug_online_work_fn); + + return cpufreq_register_governor(&cpufreq_gov_ktoonservative); +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_ktoonservative); + destroy_workqueue(dbs_wq); +} + +MODULE_AUTHOR("Alexander Clouter "); +MODULE_DESCRIPTION("'cpufreq_ktoonservativeq' - A dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors " + "optimised for use in a battery environment"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_KTOONSERVATIVEQ +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_nightmare.c b/drivers/cpufreq/cpufreq_nightmare.c new file mode 100644 index 00000000..ece971ca --- /dev/null +++ b/drivers/cpufreq/cpufreq_nightmare.c @@ -0,0 +1,1656 @@ +/* + * drivers/cpufreq/cpufreq_nightmare.c + * + * Copyright (C) 2011 Samsung Electronics co. ltd + * ByungChang Cha + * + * Based on ondemand governor + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Created by Alucard_24@xda + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_HAS_EARLYSUSPEND +#include +#endif +#define EARLYSUSPEND_HOTPLUGLOCK 1 + +/* + * runqueue average + */ + +#define RQ_AVG_TIMER_RATE 10 + +struct runqueue_data { + unsigned int nr_run_avg; + unsigned int update_rate; + int64_t last_time; + int64_t total_time; + struct delayed_work work; + struct workqueue_struct *nr_run_wq; + spinlock_t lock; +}; + +static struct runqueue_data *rq_data; +static void rq_work_fn(struct work_struct *work); + +static void start_rq_work(void) +{ + rq_data->nr_run_avg = 0; + rq_data->last_time = 0; + rq_data->total_time = 0; + if (rq_data->nr_run_wq == NULL) + rq_data->nr_run_wq = + create_singlethread_workqueue("nr_run_avg"); + + queue_delayed_work(rq_data->nr_run_wq, &rq_data->work, + msecs_to_jiffies(rq_data->update_rate)); + return; +} + +static void stop_rq_work(void) +{ + if (rq_data->nr_run_wq) + cancel_delayed_work(&rq_data->work); + return; +} + +static int __init init_rq_avg(void) +{ + rq_data = kzalloc(sizeof(struct runqueue_data), GFP_KERNEL); + if (rq_data == NULL) { + pr_err("%s cannot allocate memory\n", __func__); + return -ENOMEM; + } + spin_lock_init(&rq_data->lock); + rq_data->update_rate = RQ_AVG_TIMER_RATE; + INIT_DELAYED_WORK_DEFERRABLE(&rq_data->work, rq_work_fn); + + return 0; +} + +static void rq_work_fn(struct work_struct *work) +{ + int64_t time_diff = 0; + int64_t nr_run = 0; + unsigned long flags = 0; + int64_t cur_time = ktime_to_ns(ktime_get()); + + spin_lock_irqsave(&rq_data->lock, flags); + + if (rq_data->last_time == 0) + rq_data->last_time = cur_time; + if (rq_data->nr_run_avg == 0) + rq_data->total_time = 0; + + nr_run = nr_running() * 100; + time_diff = cur_time - rq_data->last_time; + do_div(time_diff, 1000 * 1000); + + if (time_diff != 0 && rq_data->total_time != 0) { + nr_run = (nr_run * time_diff) + + (rq_data->nr_run_avg * rq_data->total_time); + do_div(nr_run, rq_data->total_time + time_diff); + } + rq_data->nr_run_avg = nr_run; + rq_data->total_time += time_diff; + rq_data->last_time = cur_time; + + if (rq_data->update_rate != 0) + queue_delayed_work(rq_data->nr_run_wq, &rq_data->work, + msecs_to_jiffies(rq_data->update_rate)); + + spin_unlock_irqrestore(&rq_data->lock, flags); +} + +static unsigned int get_nr_run_avg(void) +{ + unsigned int nr_run_avg; + unsigned long flags = 0; + + spin_lock_irqsave(&rq_data->lock, flags); + nr_run_avg = rq_data->nr_run_avg; + rq_data->nr_run_avg = 0; + spin_unlock_irqrestore(&rq_data->lock, flags); + + return nr_run_avg; +} + + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_SAMPLING_UP_FACTOR (1) +#define MAX_SAMPLING_UP_FACTOR (100000) +#define DEF_SAMPLING_DOWN_FACTOR (2) +#define MAX_SAMPLING_DOWN_FACTOR (100000) +#define DEF_FREQ_STEP_DEC (5) + +#define DEF_SAMPLING_RATE (60000) +#define MIN_SAMPLING_RATE (10000) +#define MAX_HOTPLUG_RATE (40u) + +#define DEF_MAX_CPU_LOCK (0) +#define DEF_MIN_CPU_LOCK (0) +#define DEF_UP_NR_CPUS (1) +#define DEF_CPU_UP_RATE (10) +#define DEF_CPU_DOWN_RATE (20) +#define DEF_FREQ_STEP (30) + +#define DEF_START_DELAY (0) + +#define FREQ_FOR_RESPONSIVENESS (918000) + +#define HOTPLUG_DOWN_INDEX (0) +#define HOTPLUG_UP_INDEX (1) + +/* CPU freq will be increased if measured load > inc_cpu_load;*/ +#define DEF_INC_CPU_LOAD (80) +#define INC_CPU_LOAD_AT_MIN_FREQ (40) +#define UP_AVG_LOAD (65u) +/* CPU freq will be decreased if measured load < dec_cpu_load;*/ +#define DEF_DEC_CPU_LOAD (60) +#define DOWN_AVG_LOAD (30u) +#define DEF_FREQ_UP_BRAKE (5u) +#define DEF_HOTPLUG_COMPARE_LEVEL (0u) + +#ifdef CONFIG_MACH_MIDAS +static int hotplug_rq[4][2] = { + {0, 100}, {100, 200}, {200, 300}, {300, 0} +}; + +static int hotplug_freq[4][2] = { + {0, 540000}, + {378000, 540000}, + {378000, 540000}, + {378000, 0} +}; +#else +static int hotplug_rq[4][2] = { + {0, 100}, {100, 200}, {200, 300}, {300, 0} +}; + +static int hotplug_freq[4][2] = { + {0, 540000}, + {378000, 540000}, + {378000, 540000}, + {378000, 0} +}; +#endif + +static unsigned int min_sampling_rate; + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_nightmare(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_NIGHTMARE +static +#endif +struct cpufreq_governor cpufreq_gov_nightmare = { + .name = "nightmare", + .governor = cpufreq_governor_nightmare, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpufreq_nightmare_cpuinfo { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct work_struct up_work; + struct work_struct down_work; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_table_maxsize; + unsigned int avg_rate_mult; + int cpu; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpufreq_nightmare_cpuinfo, od_cpu_dbs_info); + +struct workqueue_struct *dvfs_workqueues; + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + + +/* + * dbs_mutex protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int freq_step_dec; + unsigned int ignore_nice; + unsigned int sampling_down_factor; + unsigned int io_is_busy; + /* nightmare tuners */ + unsigned int freq_step; + unsigned int cpu_up_rate; + unsigned int cpu_down_rate; + unsigned int up_nr_cpus; + unsigned int max_cpu_lock; + unsigned int min_cpu_lock; + atomic_t hotplug_lock; + unsigned int dvfs_debug; + unsigned int max_freq; + unsigned int min_freq; +#ifdef CONFIG_HAS_EARLYSUSPEND + int early_suspend; +#endif + unsigned int inc_cpu_load_at_min_freq; + unsigned int freq_for_responsiveness; + unsigned int inc_cpu_load; + unsigned int dec_cpu_load; + unsigned int up_avg_load; + unsigned int down_avg_load; + unsigned int sampling_up_factor; + unsigned int freq_up_brake; + unsigned int hotplug_compare_level; +} dbs_tuners_ins = { + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .freq_step_dec = DEF_FREQ_STEP_DEC, + .ignore_nice = 0, + .freq_step = DEF_FREQ_STEP, + .cpu_up_rate = DEF_CPU_UP_RATE, + .cpu_down_rate = DEF_CPU_DOWN_RATE, + .up_nr_cpus = DEF_UP_NR_CPUS, + .max_cpu_lock = DEF_MAX_CPU_LOCK, + .min_cpu_lock = DEF_MIN_CPU_LOCK, + .hotplug_lock = ATOMIC_INIT(0), + .dvfs_debug = 0, +#ifdef CONFIG_HAS_EARLYSUSPEND + .early_suspend = -1, +#endif + .inc_cpu_load_at_min_freq = INC_CPU_LOAD_AT_MIN_FREQ, + .freq_for_responsiveness = FREQ_FOR_RESPONSIVENESS, + .inc_cpu_load = DEF_INC_CPU_LOAD, + .dec_cpu_load = DEF_DEC_CPU_LOAD, + .up_avg_load = UP_AVG_LOAD, + .down_avg_load = DOWN_AVG_LOAD, + .sampling_up_factor = DEF_SAMPLING_UP_FACTOR, + .freq_up_brake = DEF_FREQ_UP_BRAKE, + .hotplug_compare_level = DEF_HOTPLUG_COMPARE_LEVEL, +}; + + +/* + * CPU hotplug lock interface + */ + +static atomic_t g_hotplug_count = ATOMIC_INIT(0); +static atomic_t g_hotplug_lock = ATOMIC_INIT(0); + +static void apply_hotplug_lock(void) +{ + int online, possible, lock, flag; + struct work_struct *work; + struct cpufreq_nightmare_cpuinfo *dbs_info; + + /* do turn_on/off cpus */ + dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ + online = num_online_cpus(); + possible = num_possible_cpus(); + lock = atomic_read(&g_hotplug_lock); + flag = lock - online; + + if (lock == 0 || flag == 0) + return; + + work = flag > 0 ? &dbs_info->up_work : &dbs_info->down_work; + + pr_debug("%s online %d possible %d lock %d flag %d %d\n", + __func__, online, possible, lock, flag, (int)abs(flag)); + + queue_work_on(dbs_info->cpu, dvfs_workqueues, work); +} + +int cpufreq_nightmare_cpu_lock(int num_core) +{ + int prev_lock; + + if (num_core < 1 || num_core > num_possible_cpus()) + return -EINVAL; + + prev_lock = atomic_read(&g_hotplug_lock); + + if (prev_lock != 0 && prev_lock < num_core) + return -EINVAL; + else if (prev_lock == num_core) + atomic_inc(&g_hotplug_count); + + atomic_set(&g_hotplug_lock, num_core); + atomic_set(&g_hotplug_count, 1); + apply_hotplug_lock(); + + return 0; +} + +int cpufreq_nightmare_cpu_unlock(int num_core) +{ + int prev_lock = atomic_read(&g_hotplug_lock); + + if (prev_lock < num_core) + return 0; + else if (prev_lock == num_core) + atomic_dec(&g_hotplug_count); + + if (atomic_read(&g_hotplug_count) == 0) + atomic_set(&g_hotplug_lock, 0); + + return 0; +} + +void cpufreq_nightmare_min_cpu_lock(unsigned int num_core) +{ + int online, flag; + struct cpufreq_nightmare_cpuinfo *dbs_info; + + dbs_tuners_ins.min_cpu_lock = min(num_core, num_possible_cpus()); + + dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ + online = num_online_cpus(); + flag = (int)num_core - online; + if (flag <= 0) + return; + queue_work_on(dbs_info->cpu, dvfs_workqueues, &dbs_info->up_work); +} + +void cpufreq_nightmare_min_cpu_unlock(void) +{ + int online, lock, flag; + struct cpufreq_nightmare_cpuinfo *dbs_info; + + dbs_tuners_ins.min_cpu_lock = 0; + + dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ + online = num_online_cpus(); + lock = atomic_read(&g_hotplug_lock); + if (lock == 0) + return; + flag = lock - online; + if (flag >= 0) + return; + queue_work_on(dbs_info->cpu, dvfs_workqueues, &dbs_info->down_work); +} + +/* + * History of CPU usage + */ +struct cpu_usage { + unsigned int freq; + int load[NR_CPUS]; + unsigned int rq_avg; + unsigned int avg_load; +}; + +struct cpu_usage_history { + struct cpu_usage usage[MAX_HOTPLUG_RATE]; + unsigned int num_hist; +}; + +struct cpu_usage_history *hotplug_histories; + +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, NULL); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + else + idle_time += get_cpu_iowait_time_us(cpu, wall); + + return idle_time; +} + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, + cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_min); + +/* cpufreq_nightmare Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(sampling_down_factor, sampling_down_factor); +show_one(ignore_nice_load, ignore_nice); +show_one(freq_step_dec, freq_step_dec); +show_one(freq_step, freq_step); +show_one(cpu_up_rate, cpu_up_rate); +show_one(cpu_down_rate, cpu_down_rate); +show_one(up_nr_cpus, up_nr_cpus); +show_one(max_cpu_lock, max_cpu_lock); +show_one(min_cpu_lock, min_cpu_lock); +show_one(dvfs_debug, dvfs_debug); +show_one(inc_cpu_load_at_min_freq, inc_cpu_load_at_min_freq); +show_one(freq_for_responsiveness, freq_for_responsiveness); +show_one(inc_cpu_load, inc_cpu_load); +show_one(dec_cpu_load, dec_cpu_load); +show_one(up_avg_load, up_avg_load); +show_one(down_avg_load, down_avg_load); +show_one(sampling_up_factor, sampling_up_factor); +show_one(freq_up_brake, freq_up_brake); +show_one(hotplug_compare_level,hotplug_compare_level); + +static ssize_t show_hotplug_lock(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", atomic_read(&g_hotplug_lock)); +} + +#define show_hotplug_param(file_name, num_core, up_down) \ +static ssize_t show_##file_name##_##num_core##_##up_down \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", file_name[num_core - 1][up_down]); \ +} + +#define store_hotplug_param(file_name, num_core, up_down) \ +static ssize_t store_##file_name##_##num_core##_##up_down \ +(struct kobject *kobj, struct attribute *attr, \ + const char *buf, size_t count) \ +{ \ + unsigned int input; \ + int ret; \ + ret = sscanf(buf, "%u", &input); \ + if (ret != 1) \ + return -EINVAL; \ + file_name[num_core - 1][up_down] = input; \ + return count; \ +} + +show_hotplug_param(hotplug_freq, 1, 1); +show_hotplug_param(hotplug_freq, 2, 0); +#ifndef CONFIG_CPU_EXYNOS4210 +show_hotplug_param(hotplug_freq, 2, 1); +show_hotplug_param(hotplug_freq, 3, 0); +show_hotplug_param(hotplug_freq, 3, 1); +show_hotplug_param(hotplug_freq, 4, 0); +#endif + +show_hotplug_param(hotplug_rq, 1, 1); +show_hotplug_param(hotplug_rq, 2, 0); +#ifndef CONFIG_CPU_EXYNOS4210 +show_hotplug_param(hotplug_rq, 2, 1); +show_hotplug_param(hotplug_rq, 3, 0); +show_hotplug_param(hotplug_rq, 3, 1); +show_hotplug_param(hotplug_rq, 4, 0); +#endif + +store_hotplug_param(hotplug_freq, 1, 1); +store_hotplug_param(hotplug_freq, 2, 0); +#ifndef CONFIG_CPU_EXYNOS4210 +store_hotplug_param(hotplug_freq, 2, 1); +store_hotplug_param(hotplug_freq, 3, 0); +store_hotplug_param(hotplug_freq, 3, 1); +store_hotplug_param(hotplug_freq, 4, 0); +#endif + +store_hotplug_param(hotplug_rq, 1, 1); +store_hotplug_param(hotplug_rq, 2, 0); +#ifndef CONFIG_CPU_EXYNOS4210 +store_hotplug_param(hotplug_rq, 2, 1); +store_hotplug_param(hotplug_rq, 3, 0); +store_hotplug_param(hotplug_rq, 3, 1); +store_hotplug_param(hotplug_rq, 4, 0); +#endif + +define_one_global_rw(hotplug_freq_1_1); +define_one_global_rw(hotplug_freq_2_0); +#ifndef CONFIG_CPU_EXYNOS4210 +define_one_global_rw(hotplug_freq_2_1); +define_one_global_rw(hotplug_freq_3_0); +define_one_global_rw(hotplug_freq_3_1); +define_one_global_rw(hotplug_freq_4_0); +#endif + +define_one_global_rw(hotplug_rq_1_1); +define_one_global_rw(hotplug_rq_2_0); +#ifndef CONFIG_CPU_EXYNOS4210 +define_one_global_rw(hotplug_rq_2_1); +define_one_global_rw(hotplug_rq_3_0); +define_one_global_rw(hotplug_rq_3_1); +define_one_global_rw(hotplug_rq_4_0); +#endif + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.io_is_busy = !!input; + return count; +} + +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + dbs_tuners_ins.sampling_down_factor = input; + + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpufreq_nightmare_cpuinfo *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = + get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + return count; +} + +static ssize_t store_freq_step_dec(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.freq_step_dec = min(input, 100u); + return count; +} + +static ssize_t store_freq_step(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.freq_step = min(input, 100u); + return count; +} + +static ssize_t store_cpu_up_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.cpu_up_rate = min(input, MAX_HOTPLUG_RATE); + return count; +} + +static ssize_t store_cpu_down_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.cpu_down_rate = min(input, MAX_HOTPLUG_RATE); + return count; +} + +static ssize_t store_up_nr_cpus(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.up_nr_cpus = min(input, num_possible_cpus()); + return count; +} + +static ssize_t store_max_cpu_lock(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.max_cpu_lock = min(input, num_possible_cpus()); + return count; +} + +static ssize_t store_min_cpu_lock(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + if (input == 0) + cpufreq_nightmare_min_cpu_unlock(); + else + cpufreq_nightmare_min_cpu_lock(input); + return count; +} + +static ssize_t store_hotplug_lock(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + int prev_lock; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + input = min(input, num_possible_cpus()); + prev_lock = atomic_read(&dbs_tuners_ins.hotplug_lock); + + if (prev_lock) + cpufreq_nightmare_cpu_unlock(prev_lock); + + if (input == 0) { + atomic_set(&dbs_tuners_ins.hotplug_lock, 0); + return count; + } + + ret = cpufreq_nightmare_cpu_lock(input); + if (ret) { + printk(KERN_ERR "[HOTPLUG] already locked with smaller value %d < %d\n", + atomic_read(&g_hotplug_lock), input); + return ret; + } + + atomic_set(&dbs_tuners_ins.hotplug_lock, input); + + return count; +} + +static ssize_t store_dvfs_debug(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.dvfs_debug = input > 0; + return count; +} + +static ssize_t store_inc_cpu_load_at_min_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > 100) { + return -EINVAL; + } + dbs_tuners_ins.inc_cpu_load_at_min_freq = min(input,dbs_tuners_ins.inc_cpu_load); + return count; +} + +static ssize_t store_freq_for_responsiveness(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.freq_for_responsiveness = input; + return count; +} + +/* inc_cpu_load */ +static ssize_t store_inc_cpu_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.inc_cpu_load = max(min(input,100u),10u); + return count; +} + +/* dec_cpu_load */ +static ssize_t store_dec_cpu_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.dec_cpu_load = max(min(input,95u),5u); + return count; +} + +/* up_avg_load */ +static ssize_t store_up_avg_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.up_avg_load = max(min(input,100u),10u); + return count; +} + +/* down_avg_load */ +static ssize_t store_down_avg_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.down_avg_load = max(min(input,95u),5u); + return count; +} + +/* sampling_up_factor */ +static ssize_t store_sampling_up_factor(struct kobject *a, + struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_UP_FACTOR || input < 1) + return -EINVAL; + dbs_tuners_ins.sampling_up_factor = input; + + return count; +} + +/* freq_up_brake */ +static ssize_t store_freq_up_brake(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1 || input < 0 || input > 100) + return -EINVAL; + + if (input == dbs_tuners_ins.freq_up_brake) { /* nothing to do */ + return count; + } + + dbs_tuners_ins.freq_up_brake = input; + + return count; +} + +/* hotplug_compare_level */ +static ssize_t store_hotplug_compare_level(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1 || input < 0 || input > 1) + return -EINVAL; + + if (input == dbs_tuners_ins.hotplug_compare_level) { /* nothing to do */ + return count; + } + + dbs_tuners_ins.hotplug_compare_level = input; + + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(freq_step_dec); +define_one_global_rw(freq_step); +define_one_global_rw(cpu_up_rate); +define_one_global_rw(cpu_down_rate); +define_one_global_rw(up_nr_cpus); +define_one_global_rw(max_cpu_lock); +define_one_global_rw(min_cpu_lock); +define_one_global_rw(hotplug_lock); +define_one_global_rw(dvfs_debug); +define_one_global_rw(inc_cpu_load_at_min_freq); +define_one_global_rw(freq_for_responsiveness); +define_one_global_rw(inc_cpu_load); +define_one_global_rw(dec_cpu_load); +define_one_global_rw(up_avg_load); +define_one_global_rw(down_avg_load); +define_one_global_rw(sampling_up_factor); +define_one_global_rw(freq_up_brake); +define_one_global_rw(hotplug_compare_level); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &sampling_down_factor.attr, + &ignore_nice_load.attr, + &io_is_busy.attr, + &freq_step_dec.attr, + &freq_step.attr, + &cpu_up_rate.attr, + &cpu_down_rate.attr, + &up_nr_cpus.attr, + /* priority: hotplug_lock > max_cpu_lock > min_cpu_lock + Exception: hotplug_lock on early_suspend uses min_cpu_lock */ + &max_cpu_lock.attr, + &min_cpu_lock.attr, + &hotplug_lock.attr, + &dvfs_debug.attr, + &hotplug_freq_1_1.attr, + &hotplug_freq_2_0.attr, +#ifndef CONFIG_CPU_EXYNOS4210 + &hotplug_freq_2_1.attr, + &hotplug_freq_3_0.attr, + &hotplug_freq_3_1.attr, + &hotplug_freq_4_0.attr, +#endif + &hotplug_rq_1_1.attr, + &hotplug_rq_2_0.attr, +#ifndef CONFIG_CPU_EXYNOS4210 + &hotplug_rq_2_1.attr, + &hotplug_rq_3_0.attr, + &hotplug_rq_3_1.attr, + &hotplug_rq_4_0.attr, +#endif + &inc_cpu_load_at_min_freq.attr, + &freq_for_responsiveness.attr, + &inc_cpu_load.attr, + &dec_cpu_load.attr, + &up_avg_load.attr, + &down_avg_load.attr, + &sampling_up_factor.attr, + &freq_up_brake.attr, + &hotplug_compare_level.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "nightmare", +}; + +/************************** sysfs end ************************/ + +static void __ref cpu_up_work(struct work_struct *work) +{ + int cpu; + int online = num_online_cpus(); + int nr_up = dbs_tuners_ins.up_nr_cpus; + int min_cpu_lock = dbs_tuners_ins.min_cpu_lock; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock && min_cpu_lock) + nr_up = max(hotplug_lock, min_cpu_lock) - online; + else if (hotplug_lock) + nr_up = hotplug_lock - online; + else if (min_cpu_lock) + nr_up = max(nr_up, min_cpu_lock - online); + + if (online == 1) { + printk(KERN_ERR "CPU_UP 3\n"); + cpu_up(num_possible_cpus() - 1); + nr_up -= 1; + } + + for_each_cpu_not(cpu, cpu_online_mask) { + if (nr_up-- == 0) + break; + if (cpu == 0) + continue; + printk(KERN_ERR "CPU_UP %d\n", cpu); + cpu_up(cpu); + } +} + +static void cpu_down_work(struct work_struct *work) +{ + int cpu; + int online = num_online_cpus(); + int nr_down = 1; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock) + nr_down = online - hotplug_lock; + + for_each_online_cpu(cpu) { + if (cpu == 0) + continue; + printk(KERN_ERR "CPU_DOWN %d\n", cpu); + cpu_down(cpu); + if (--nr_down == 0) + break; + } +} + +static void debug_hotplug_check(int which, int rq_avg, int freq, + struct cpu_usage *usage) +{ + int cpu; + printk(KERN_ERR "CHECK %s rq %d.%02d freq %d [", which ? "up" : "down", + rq_avg / 100, rq_avg % 100, freq); + for_each_online_cpu(cpu) { + printk(KERN_ERR "(%d, %d), ", cpu, usage->load[cpu]); + } + printk(KERN_ERR "]\n"); +} + +static int check_up(void) +{ + int num_hist = hotplug_histories->num_hist; + struct cpu_usage *usage; + int freq, rq_avg; + int avg_load; + int i; + int up_rate = dbs_tuners_ins.cpu_up_rate; + unsigned int up_avg_load = dbs_tuners_ins.up_avg_load; + unsigned int hotplug_compare_level = dbs_tuners_ins.hotplug_compare_level; + int up_freq, up_rq; + int min_freq = INT_MAX; + int min_rq_avg = INT_MAX; + int min_avg_load = INT_MAX; + int online; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock > 0) + return 0; + + online = num_online_cpus(); + up_freq = hotplug_freq[online - 1][HOTPLUG_UP_INDEX]; + up_rq = hotplug_rq[online - 1][HOTPLUG_UP_INDEX]; + + if (online == num_possible_cpus()) + return 0; + + if (dbs_tuners_ins.max_cpu_lock != 0 + && online >= dbs_tuners_ins.max_cpu_lock) + return 0; + + if (dbs_tuners_ins.min_cpu_lock != 0 + && online < dbs_tuners_ins.min_cpu_lock) + return 1; + + if (num_hist == 0 || num_hist % up_rate) + return 0; + + if (hotplug_compare_level == 0) { + for (i = num_hist - 1; i >= num_hist - up_rate; --i) { + usage = &hotplug_histories->usage[i]; + + freq = usage->freq; + rq_avg = usage->rq_avg; + avg_load = usage->avg_load; + + min_freq = min(min_freq, freq); + min_rq_avg = min(min_rq_avg, rq_avg); + min_avg_load = min(min_avg_load, avg_load); + + if (dbs_tuners_ins.dvfs_debug) + debug_hotplug_check(1, rq_avg, freq, usage); + } + } else { + usage = &hotplug_histories->usage[num_hist - 1]; + min_freq = usage->freq; + min_rq_avg = usage->rq_avg; + min_avg_load = usage->avg_load; + if (dbs_tuners_ins.dvfs_debug) + debug_hotplug_check(1, min_rq_avg, min_freq, usage); + } + + if (min_freq >= up_freq && min_rq_avg > up_rq) { + if (online >= 1) { + if (min_avg_load < up_avg_load) + return 0; + } + printk(KERN_ERR "[HOTPLUG IN] %s %d>=%d && %d>%d\n", + __func__, min_freq, up_freq, min_rq_avg, up_rq); + hotplug_histories->num_hist = 0; + return 1; + } + return 0; +} + +static int check_down(void) +{ + int num_hist = hotplug_histories->num_hist; + struct cpu_usage *usage; + int freq, rq_avg; + int avg_load; + int i; + int down_rate = dbs_tuners_ins.cpu_down_rate; + unsigned int down_avg_load = dbs_tuners_ins.down_avg_load; + unsigned int hotplug_compare_level = dbs_tuners_ins.hotplug_compare_level; + int down_freq, down_rq; + int max_freq = 0; + int max_rq_avg = 0; + int max_avg_load = 0; + int online; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock > 0) + return 0; + + online = num_online_cpus(); + down_freq = hotplug_freq[online - 1][HOTPLUG_DOWN_INDEX]; + down_rq = hotplug_rq[online - 1][HOTPLUG_DOWN_INDEX]; + + if (online == 1) + return 0; + + if (dbs_tuners_ins.max_cpu_lock != 0 + && online > dbs_tuners_ins.max_cpu_lock) + return 1; + + if (dbs_tuners_ins.min_cpu_lock != 0 + && online <= dbs_tuners_ins.min_cpu_lock) + return 0; + + if (num_hist == 0 || num_hist % down_rate) + return 0; + + if (hotplug_compare_level == 0) { + for (i = num_hist - 1; i >= num_hist - down_rate; --i) { + usage = &hotplug_histories->usage[i]; + + freq = usage->freq; + rq_avg = usage->rq_avg; + avg_load = usage->avg_load; + + max_freq = max(max_freq, freq); + max_rq_avg = max(max_rq_avg, rq_avg); + max_avg_load = max(max_avg_load, avg_load); + + if (dbs_tuners_ins.dvfs_debug) + debug_hotplug_check(0, rq_avg, freq, usage); + } + } else { + usage = &hotplug_histories->usage[num_hist - 1]; + max_freq = usage->freq; + max_rq_avg = usage->rq_avg; + max_avg_load = usage->avg_load; + if (dbs_tuners_ins.dvfs_debug) + debug_hotplug_check(0, max_rq_avg, max_freq, usage); + } + + if ((max_freq <= down_freq && max_rq_avg <= down_rq) || (online >= 2 && max_avg_load < down_avg_load)) { + printk(KERN_ERR "[HOTPLUG OUT] %s %d<=%d && %d<%d\n", + __func__, max_freq, down_freq, max_rq_avg, down_rq); + hotplug_histories->num_hist = 0; + return 1; + } + + return 0; +} + +static void dbs_check_cpu(struct cpufreq_nightmare_cpuinfo *this_dbs_info) +{ + struct cpufreq_policy *policy; + unsigned int j; + int num_hist = hotplug_histories->num_hist; + int max_hotplug_rate = max(dbs_tuners_ins.cpu_up_rate,dbs_tuners_ins.cpu_down_rate); + int inc_cpu_load = dbs_tuners_ins.inc_cpu_load; + int dec_cpu_load = dbs_tuners_ins.dec_cpu_load; + unsigned int avg_rate_mult = 0; + + /* add total_load, avg_load to get average load */ + unsigned int total_load = 0; + unsigned int avg_load = 0; + int rq_avg = 0; + policy = this_dbs_info->cur_policy; + + hotplug_histories->usage[num_hist].freq = policy->cur; + hotplug_histories->usage[num_hist].rq_avg = get_nr_run_avg(); + + /* add total_load, avg_load to get average load */ + rq_avg = hotplug_histories->usage[num_hist].rq_avg; + + ++hotplug_histories->num_hist; + + for_each_cpu(j, policy->cpus) { + struct cpufreq_nightmare_cpuinfo *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + cputime64_t prev_wall_time, prev_idle_time, prev_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + int load; + //int freq_avg; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + prev_wall_time = j_dbs_info->prev_cpu_wall; + prev_idle_time = j_dbs_info->prev_cpu_idle; + prev_iowait_time = j_dbs_info->prev_cpu_iowait; + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + prev_wall_time); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + prev_idle_time); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, + prev_iowait_time); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + u64 cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + if (cpu_online(j)) { + total_load += load; + hotplug_histories->usage[num_hist].load[j] = load; + } else { + hotplug_histories->usage[num_hist].load[j] = -1; + } + + } + /* calculate the average load across all related CPUs */ + avg_load = total_load / num_online_cpus(); + hotplug_histories->usage[num_hist].avg_load = avg_load; + + /* Check for CPU hotplug */ + if (check_up()) { + queue_work_on(this_dbs_info->cpu, dvfs_workqueues,&this_dbs_info->up_work); + } + else if (check_down()) { + queue_work_on(this_dbs_info->cpu, dvfs_workqueues,&this_dbs_info->down_work); + } + if (hotplug_histories->num_hist == max_hotplug_rate) + hotplug_histories->num_hist = 0; + + /* CPUs Online Scale Frequency*/ + for_each_cpu(j, policy->cpus) { + struct cpufreq_nightmare_cpuinfo *j_dbs_info; + int load; + int index; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + if (cpu_online(j)) { + index = 0; + load = hotplug_histories->usage[num_hist].load[j]; + // just a tips to scale up the frequency fastly + if (j_dbs_info->cur_policy->cur < dbs_tuners_ins.freq_for_responsiveness) + inc_cpu_load = dbs_tuners_ins.inc_cpu_load_at_min_freq; + else + inc_cpu_load = dbs_tuners_ins.inc_cpu_load; + + // Check for frequency increase or for frequency decrease + if (load >= inc_cpu_load) { + unsigned int inc_load = (load * j_dbs_info->cur_policy->min) / 100; + unsigned int inc_step = (dbs_tuners_ins.freq_step * j_dbs_info->cur_policy->min) / 100; + unsigned int inc; + unsigned int freq_up = 0; + + avg_rate_mult += dbs_tuners_ins.sampling_up_factor; + + // if we cannot increment the frequency anymore, break out early + if (j_dbs_info->cur_policy->cur == j_dbs_info->cur_policy->max) { + continue; + } + + inc = inc_load + inc_step; + inc -= (dbs_tuners_ins.freq_up_brake * j_dbs_info->cur_policy->min) / 100; + + freq_up = min(j_dbs_info->cur_policy->max,j_dbs_info->cur_policy->cur + inc); + + if (freq_up != j_dbs_info->cur_policy->cur) { + __cpufreq_driver_target(j_dbs_info->cur_policy, freq_up, CPUFREQ_RELATION_L); + } + + } + else if (load < dec_cpu_load && load > -1) { + unsigned int dec_load = ((100 - load) * (j_dbs_info->cur_policy->min)) / 100; + unsigned int dec_step = (dbs_tuners_ins.freq_step_dec * (j_dbs_info->cur_policy->min)) / 100; + unsigned int dec; + unsigned int freq_down = 0; + + avg_rate_mult += dbs_tuners_ins.sampling_down_factor; + + // if we cannot reduce the frequency anymore, break out early + if (j_dbs_info->cur_policy->cur == j_dbs_info->cur_policy->min) { + continue; + } + + dec = dec_load + dec_step; + + freq_down = max(j_dbs_info->cur_policy->min,j_dbs_info->cur_policy->cur - dec); + + if (freq_down != j_dbs_info->cur_policy->cur) { + __cpufreq_driver_target(j_dbs_info->cur_policy, freq_down, CPUFREQ_RELATION_L); + } + } + } + } + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + if (avg_rate_mult > 0) + this_dbs_info->avg_rate_mult = (avg_rate_mult * 10) / num_online_cpus(); + else + this_dbs_info->avg_rate_mult = 10; + + return; +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpufreq_nightmare_cpuinfo *dbs_info = + container_of(work, struct cpufreq_nightmare_cpuinfo, work.work); + unsigned int cpu = dbs_info->cpu; + int delay; + + mutex_lock(&dbs_info->timer_mutex); + + dbs_check_cpu(dbs_info); + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + delay = usecs_to_jiffies((dbs_tuners_ins.sampling_rate * (dbs_info->avg_rate_mult < 10 ? 10 : dbs_info->avg_rate_mult)) / 10); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + queue_delayed_work_on(cpu, dvfs_workqueues, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpufreq_nightmare_cpuinfo *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(DEF_START_DELAY * 1000 * 1000 + + dbs_tuners_ins.sampling_rate); + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + INIT_WORK(&dbs_info->up_work, cpu_up_work); + INIT_WORK(&dbs_info->down_work, cpu_down_work); + + queue_delayed_work_on(dbs_info->cpu, dvfs_workqueues, + &dbs_info->work, delay + 2 * HZ); +} + +static inline void dbs_timer_exit(struct cpufreq_nightmare_cpuinfo *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); + cancel_work_sync(&dbs_info->up_work); + cancel_work_sync(&dbs_info->down_work); +} + +static int reboot_notifier_call(struct notifier_block *this, + unsigned long code, void *_cmd) +{ + atomic_set(&g_hotplug_lock, 1); + return NOTIFY_DONE; +} + +static struct notifier_block reboot_notifier = { + .notifier_call = reboot_notifier_call, +}; + +#ifdef CONFIG_HAS_EARLYSUSPEND +static struct early_suspend early_suspend; +unsigned int previous_freq_step; +unsigned int previous_sampling_rate; +static void cpufreq_nightmare_early_suspend(struct early_suspend *h) +{ +#if EARLYSUSPEND_HOTPLUGLOCK + dbs_tuners_ins.early_suspend = + atomic_read(&g_hotplug_lock); +#endif + previous_freq_step = dbs_tuners_ins.freq_step; + previous_sampling_rate = dbs_tuners_ins.sampling_rate; + dbs_tuners_ins.freq_step = 10; + dbs_tuners_ins.sampling_rate = 200000; +#if EARLYSUSPEND_HOTPLUGLOCK + atomic_set(&g_hotplug_lock, + (dbs_tuners_ins.min_cpu_lock) ? dbs_tuners_ins.min_cpu_lock : 1); + apply_hotplug_lock(); + stop_rq_work(); +#endif +} +static void cpufreq_nightmare_late_resume(struct early_suspend *h) +{ +#if EARLYSUSPEND_HOTPLUGLOCK + atomic_set(&g_hotplug_lock, dbs_tuners_ins.early_suspend); +#endif + dbs_tuners_ins.early_suspend = -1; + dbs_tuners_ins.freq_step = previous_freq_step; + dbs_tuners_ins.sampling_rate = previous_sampling_rate; +#if EARLYSUSPEND_HOTPLUGLOCK + apply_hotplug_lock(); + start_rq_work(); +#endif +} +#endif + +static int cpufreq_governor_nightmare(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpufreq_nightmare_cpuinfo *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + dbs_tuners_ins.max_freq = policy->max; + dbs_tuners_ins.min_freq = policy->min; + hotplug_histories->num_hist = 0; + start_rq_work(); + + mutex_lock(&dbs_mutex); + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpufreq_nightmare_cpuinfo *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + this_dbs_info->cpu = cpu; + this_dbs_info->avg_rate_mult = 20; + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + min_sampling_rate = MIN_SAMPLING_RATE; + dbs_tuners_ins.sampling_rate = DEF_SAMPLING_RATE; + dbs_tuners_ins.io_is_busy = 0; + } + mutex_unlock(&dbs_mutex); + + register_reboot_notifier(&reboot_notifier); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + +#if !EARLYSUSPEND_HOTPLUGLOCK + register_pm_notifier(&pm_notifier); +#endif +#ifdef CONFIG_HAS_EARLYSUSPEND + register_early_suspend(&early_suspend); +#endif + break; + + case CPUFREQ_GOV_STOP: +#ifdef CONFIG_HAS_EARLYSUSPEND + unregister_early_suspend(&early_suspend); +#endif +#if !EARLYSUSPEND_HOTPLUGLOCK + unregister_pm_notifier(&pm_notifier); +#endif + + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + mutex_destroy(&this_dbs_info->timer_mutex); + + unregister_reboot_notifier(&reboot_notifier); + + dbs_enable--; + mutex_unlock(&dbs_mutex); + + stop_rq_work(); + + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, + CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, + CPUFREQ_RELATION_L); + + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +static int __init cpufreq_gov_nightmare_init(void) +{ + int ret; + + ret = init_rq_avg(); + if (ret) + return ret; + + hotplug_histories = kzalloc(sizeof(struct cpu_usage_history), GFP_KERNEL); + if (!hotplug_histories) { + pr_err("%s cannot create hotplug history array\n", __func__); + ret = -ENOMEM; + goto err_hist; + } + + dvfs_workqueues = create_workqueue("knightmare"); + if (!dvfs_workqueues) { + pr_err("%s cannot create workqueue\n", __func__); + ret = -ENOMEM; + goto err_queue; + } + + ret = cpufreq_register_governor(&cpufreq_gov_nightmare); + if (ret) + goto err_reg; + +#ifdef CONFIG_HAS_EARLYSUSPEND + early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; + early_suspend.suspend = cpufreq_nightmare_early_suspend; + early_suspend.resume = cpufreq_nightmare_late_resume; +#endif + + return ret; + +err_reg: + destroy_workqueue(dvfs_workqueues); +err_queue: + kfree(hotplug_histories); +err_hist: + kfree(rq_data); + return ret; +} + +static void __exit cpufreq_gov_nightmare_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_nightmare); + destroy_workqueue(dvfs_workqueues); + kfree(hotplug_histories); + kfree(rq_data); +} + +MODULE_AUTHOR("ByungChang Cha "); +MODULE_DESCRIPTION("'cpufreq_nightmare' - A dynamic cpufreq/cpuhotplug governor"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_NIGHTMARE +fs_initcall(cpufreq_gov_nightmare_init); +#else +module_init(cpufreq_gov_nightmare_init); +#endif +module_exit(cpufreq_gov_nightmare_exit); diff --git a/drivers/cpufreq/cpufreq_pegasusq.c b/drivers/cpufreq/cpufreq_pegasusq.c new file mode 100644 index 00000000..230abf81 --- /dev/null +++ b/drivers/cpufreq/cpufreq_pegasusq.c @@ -0,0 +1,1636 @@ +/* + * drivers/cpufreq/cpufreq_pegasusq.c + * + * Copyright (C) 2011 Samsung Electronics co. ltd + * ByungChang Cha + * + * Based on ondemand governor + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_HAS_EARLYSUSPEND +#include +#endif +#define EARLYSUSPEND_HOTPLUGLOCK 1 + +/* + * runqueue average + */ + +#define RQ_AVG_TIMER_RATE 10 + +static bool boostpulse_relayf = false; +static unsigned int boostpulse_relay_sr = 0; +static unsigned int Lboostpulse_value = 1134000; + +extern void apenable_auto_hotplug(bool state); +extern bool apget_enable_auto_hotplug(void); +static bool prev_apenable; + +struct runqueue_data { + unsigned int nr_run_avg; + unsigned int update_rate; + int64_t last_time; + int64_t total_time; + struct delayed_work work; + struct workqueue_struct *nr_run_wq; + spinlock_t lock; +}; + +static struct runqueue_data *rq_data; +static void rq_work_fn(struct work_struct *work); + +static void start_rq_work(void) +{ + rq_data->nr_run_avg = 0; + rq_data->last_time = 0; + rq_data->total_time = 0; + if (rq_data->nr_run_wq == NULL) + rq_data->nr_run_wq = + create_singlethread_workqueue("nr_run_avg"); + + queue_delayed_work(rq_data->nr_run_wq, &rq_data->work, + msecs_to_jiffies(rq_data->update_rate)); + return; +} + +static void stop_rq_work(void) +{ + if (rq_data->nr_run_wq) + cancel_delayed_work(&rq_data->work); + return; +} + +static int __init init_rq_avg(void) +{ + rq_data = kzalloc(sizeof(struct runqueue_data), GFP_KERNEL); + if (rq_data == NULL) { + pr_err("%s cannot allocate memory\n", __func__); + return -ENOMEM; + } + spin_lock_init(&rq_data->lock); + rq_data->update_rate = RQ_AVG_TIMER_RATE; + INIT_DELAYED_WORK_DEFERRABLE(&rq_data->work, rq_work_fn); + + return 0; +} + +static void rq_work_fn(struct work_struct *work) +{ + int64_t time_diff = 0; + int64_t nr_run = 0; + unsigned long flags = 0; + int64_t cur_time = ktime_to_ns(ktime_get()); + + spin_lock_irqsave(&rq_data->lock, flags); + + if (rq_data->last_time == 0) + rq_data->last_time = cur_time; + if (rq_data->nr_run_avg == 0) + rq_data->total_time = 0; + + nr_run = nr_running() * 100; + time_diff = cur_time - rq_data->last_time; + do_div(time_diff, 1000 * 1000); + + if (time_diff != 0 && rq_data->total_time != 0) { + nr_run = (nr_run * time_diff) + + (rq_data->nr_run_avg * rq_data->total_time); + do_div(nr_run, rq_data->total_time + time_diff); + } + rq_data->nr_run_avg = nr_run; + rq_data->total_time += time_diff; + rq_data->last_time = cur_time; + + if (rq_data->update_rate != 0) + queue_delayed_work(rq_data->nr_run_wq, &rq_data->work, + msecs_to_jiffies(rq_data->update_rate)); + + spin_unlock_irqrestore(&rq_data->lock, flags); +} + +static unsigned int get_nr_run_avg(void) +{ + unsigned int nr_run_avg; + unsigned long flags = 0; + + spin_lock_irqsave(&rq_data->lock, flags); + nr_run_avg = rq_data->nr_run_avg; + rq_data->nr_run_avg = 0; + spin_unlock_irqrestore(&rq_data->lock, flags); + + return nr_run_avg; +} + + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_SAMPLING_DOWN_FACTOR (3) +#define MAX_SAMPLING_DOWN_FACTOR (100000) +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (14) +#define DEF_FREQUENCY_UP_THRESHOLD (95) + +/* for multiple freq_step */ +#define DEF_UP_THRESHOLD_DIFF (5) + +#define DEF_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) +#define DEF_SAMPLING_RATE (40000) +#define MIN_SAMPLING_RATE (10000) +#define MAX_HOTPLUG_RATE (40u) + +#define DEF_MAX_CPU_LOCK (0) +#define DEF_MIN_CPU_LOCK (0) +#define DEF_CPU_UP_FREQ (500000) +#define DEF_CPU_DOWN_FREQ (200000) +#define DEF_UP_NR_CPUS (1) +#define DEF_CPU_UP_RATE (9) +#define DEF_CPU_DOWN_RATE (3) +#define DEF_FREQ_STEP (30) +/* for multiple freq_step */ +#define DEF_FREQ_STEP_DEC (13) + +#define DEF_START_DELAY (0) + +#define UP_THRESHOLD_AT_MIN_FREQ (55) +#define FREQ_FOR_RESPONSIVENESS (400000) +/* for fast decrease */ +#define FREQ_FOR_FAST_DOWN (1200000) +#define UP_THRESHOLD_AT_FAST_DOWN (95) + +#define HOTPLUG_DOWN_INDEX (0) +#define HOTPLUG_UP_INDEX (1) + +#ifdef CONFIG_MACH_MIDAS +static int hotplug_rq[4][2] = { + {0, 100}, {100, 200}, {200, 300}, {300, 0} +}; + +static int hotplug_freq[4][2] = { + {0, 500000}, + {200000, 600000}, + {500000, 800000}, + {500000, 0} +}; +#else +static int hotplug_rq[4][2] = { + {0, 200}, {200, 200}, {200, 300}, {300, 0} +}; + +static int hotplug_freq[4][2] = { + {0, 800000}, + {500000, 500000}, + {200000, 500000}, + {200000, 0} +}; +#endif + +static unsigned int min_sampling_rate; + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_PEGASUSQ +static +#endif +struct cpufreq_governor cpufreq_gov_pegasusq = { + .name = "pegasusq", + .governor = cpufreq_governor_dbs, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct work_struct up_work; + struct work_struct down_work; + struct cpufreq_frequency_table *freq_table; + unsigned int rate_mult; + int cpu; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); + +struct workqueue_struct *dvfs_workqueue; + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int ignore_nice; + unsigned int sampling_down_factor; + unsigned int io_is_busy; + /* pegasusq tuners */ + unsigned int freq_step; + unsigned int cpu_up_rate; + unsigned int cpu_down_rate; + unsigned int cpu_up_freq; + unsigned int cpu_down_freq; + unsigned int up_nr_cpus; + unsigned int max_cpu_lock; + unsigned int min_cpu_lock; + atomic_t hotplug_lock; + unsigned int dvfs_debug; + unsigned int max_freq; + unsigned int min_freq; +#ifdef CONFIG_HAS_EARLYSUSPEND + int early_suspend; +#endif + unsigned int up_threshold_at_min_freq; + unsigned int freq_for_responsiveness; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .ignore_nice = 1, + .freq_step = DEF_FREQ_STEP, + .cpu_up_rate = DEF_CPU_UP_RATE, + .cpu_down_rate = DEF_CPU_DOWN_RATE, + .cpu_up_freq = DEF_CPU_UP_FREQ, + .cpu_down_freq = DEF_CPU_DOWN_FREQ, + .up_nr_cpus = DEF_UP_NR_CPUS, + .max_cpu_lock = DEF_MAX_CPU_LOCK, + .min_cpu_lock = DEF_MIN_CPU_LOCK, + .hotplug_lock = ATOMIC_INIT(0), + .dvfs_debug = 0, +#ifdef CONFIG_HAS_EARLYSUSPEND + .early_suspend = -1, +#endif + .up_threshold_at_min_freq = UP_THRESHOLD_AT_MIN_FREQ, + .freq_for_responsiveness = FREQ_FOR_RESPONSIVENESS, +}; + + +/* + * CPU hotplug lock interface + */ + +static atomic_t g_hotplug_count = ATOMIC_INIT(0); +static atomic_t g_hotplug_lock = ATOMIC_INIT(0); + +static void apply_hotplug_lock(void) +{ + int online, possible, lock, flag; + struct work_struct *work; + struct cpu_dbs_info_s *dbs_info; + + /* do turn_on/off cpus */ + dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ + online = num_online_cpus(); + possible = num_possible_cpus(); + lock = atomic_read(&g_hotplug_lock); + flag = lock - online; + + if (flag == 0) + return; + + work = flag > 0 ? &dbs_info->up_work : &dbs_info->down_work; + + pr_debug("%s online %d possible %d lock %d flag %d %d\n", + __func__, online, possible, lock, flag, (int)abs(flag)); + + queue_work_on(dbs_info->cpu, dvfs_workqueue, work); +} + +int cpufreq_pegasusq_cpu_lock(int num_core) +{ + int prev_lock; + + if (num_core < 1 || num_core > num_possible_cpus()) + return -EINVAL; + + prev_lock = atomic_read(&g_hotplug_lock); + + if (prev_lock != 0 && prev_lock < num_core) + return -EINVAL; + else if (prev_lock == num_core) + atomic_inc(&g_hotplug_count); + + atomic_set(&g_hotplug_lock, num_core); + atomic_set(&g_hotplug_count, 1); + apply_hotplug_lock(); + + return 0; +} + +int cpufreq_pegasusq_cpu_unlock(int num_core) +{ + int prev_lock = atomic_read(&g_hotplug_lock); + + if (prev_lock < num_core) + return 0; + else if (prev_lock == num_core) + atomic_dec(&g_hotplug_count); + + if (atomic_read(&g_hotplug_count) == 0) + atomic_set(&g_hotplug_lock, 0); + + return 0; +} + +void cpufreq_pegasusq_min_cpu_lock(unsigned int num_core) +{ + int online, flag; + struct cpu_dbs_info_s *dbs_info; + + dbs_tuners_ins.min_cpu_lock = min(num_core, num_possible_cpus()); + + dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ + online = num_online_cpus(); + flag = (int)num_core - online; + if (flag <= 0) + return; + queue_work_on(dbs_info->cpu, dvfs_workqueue, &dbs_info->up_work); +} + +void cpufreq_pegasusq_min_cpu_unlock(void) +{ + int online, lock, flag; + struct cpu_dbs_info_s *dbs_info; + + dbs_tuners_ins.min_cpu_lock = 0; + + dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ + online = num_online_cpus(); + lock = atomic_read(&g_hotplug_lock); + if (lock == 0) + return; + flag = lock - online; + if (flag >= 0) + return; + queue_work_on(dbs_info->cpu, dvfs_workqueue, &dbs_info->down_work); +} + +/* + * History of CPU usage + */ +struct cpu_usage { + unsigned int freq; + unsigned int load[NR_CPUS]; + unsigned int rq_avg; + unsigned int avg_load; +}; + +struct cpu_usage_history { + struct cpu_usage usage[MAX_HOTPLUG_RATE]; + unsigned int num_hist; +}; + +struct cpu_usage_history *hotplug_history; + +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, + u64 *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + + return idle_time; +} + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, + cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +static ssize_t show_boostpulse_value(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", Lboostpulse_value / 1000); +} + +define_one_global_ro(sampling_rate_min); + +/* cpufreq_pegasusq Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(sampling_down_factor, sampling_down_factor); +show_one(ignore_nice_load, ignore_nice); +show_one(down_differential, down_differential); +show_one(freq_step, freq_step); +show_one(cpu_up_rate, cpu_up_rate); +show_one(cpu_down_rate, cpu_down_rate); +show_one(cpu_up_freq, cpu_up_freq); +show_one(cpu_down_freq, cpu_down_freq); +show_one(up_nr_cpus, up_nr_cpus); +show_one(max_cpu_lock, max_cpu_lock); +show_one(min_cpu_lock, min_cpu_lock); +show_one(dvfs_debug, dvfs_debug); +show_one(up_threshold_at_min_freq, up_threshold_at_min_freq); +show_one(freq_for_responsiveness, freq_for_responsiveness); +static ssize_t show_hotplug_lock(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", atomic_read(&g_hotplug_lock)); +} + +#define show_hotplug_param(file_name, num_core, up_down) \ +static ssize_t show_##file_name##_##num_core##_##up_down \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", file_name[num_core - 1][up_down]); \ +} + +#define store_hotplug_param(file_name, num_core, up_down) \ +static ssize_t store_##file_name##_##num_core##_##up_down \ +(struct kobject *kobj, struct attribute *attr, \ + const char *buf, size_t count) \ +{ \ + unsigned int input; \ + int ret; \ + ret = sscanf(buf, "%u", &input); \ + if (ret != 1) \ + return -EINVAL; \ + file_name[num_core - 1][up_down] = input; \ + return count; \ +} + +show_hotplug_param(hotplug_freq, 1, 1); +show_hotplug_param(hotplug_freq, 2, 0); +show_hotplug_param(hotplug_freq, 2, 1); +show_hotplug_param(hotplug_freq, 3, 0); +show_hotplug_param(hotplug_freq, 3, 1); +show_hotplug_param(hotplug_freq, 4, 0); + +show_hotplug_param(hotplug_rq, 1, 1); +show_hotplug_param(hotplug_rq, 2, 0); +show_hotplug_param(hotplug_rq, 2, 1); +show_hotplug_param(hotplug_rq, 3, 0); +show_hotplug_param(hotplug_rq, 3, 1); +show_hotplug_param(hotplug_rq, 4, 0); + +store_hotplug_param(hotplug_freq, 1, 1); +store_hotplug_param(hotplug_freq, 2, 0); +store_hotplug_param(hotplug_freq, 2, 1); +store_hotplug_param(hotplug_freq, 3, 0); +store_hotplug_param(hotplug_freq, 3, 1); +store_hotplug_param(hotplug_freq, 4, 0); + +store_hotplug_param(hotplug_rq, 1, 1); +store_hotplug_param(hotplug_rq, 2, 0); +store_hotplug_param(hotplug_rq, 2, 1); +store_hotplug_param(hotplug_rq, 3, 0); +store_hotplug_param(hotplug_rq, 3, 1); +store_hotplug_param(hotplug_rq, 4, 0); + +define_one_global_rw(hotplug_freq_1_1); +define_one_global_rw(hotplug_freq_2_0); +define_one_global_rw(hotplug_freq_2_1); +define_one_global_rw(hotplug_freq_3_0); +define_one_global_rw(hotplug_freq_3_1); +define_one_global_rw(hotplug_freq_4_0); + +define_one_global_rw(hotplug_rq_1_1); +define_one_global_rw(hotplug_rq_2_0); +define_one_global_rw(hotplug_rq_2_1); +define_one_global_rw(hotplug_rq_3_0); +define_one_global_rw(hotplug_rq_3_1); +define_one_global_rw(hotplug_rq_4_0); + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.io_is_busy = !!input; + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold = input; + return count; +} + +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input, j; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + dbs_tuners_ins.sampling_down_factor = input; + + /* Reset down sampling multiplier in case it was active */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->rate_mult = 1; + } + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = + get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + return count; +} + +static ssize_t store_down_differential(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.down_differential = min(input, 100u); + return count; +} + +static ssize_t store_freq_step(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.freq_step = min(input, 100u); + return count; +} + +static ssize_t store_cpu_up_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.cpu_up_rate = min(input, MAX_HOTPLUG_RATE); + return count; +} + +static ssize_t store_cpu_down_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.cpu_down_rate = min(input, MAX_HOTPLUG_RATE); + return count; +} + +static ssize_t store_cpu_up_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.cpu_up_freq = min(input, dbs_tuners_ins.max_freq); + return count; +} + +static ssize_t store_cpu_down_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.cpu_down_freq = max(input, dbs_tuners_ins.min_freq); + return count; +} + +static ssize_t store_up_nr_cpus(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.up_nr_cpus = min(input, num_possible_cpus()); + return count; +} + +static ssize_t store_max_cpu_lock(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.max_cpu_lock = min(input, num_possible_cpus()); + return count; +} + +static ssize_t store_min_cpu_lock(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + if (input == 0) + cpufreq_pegasusq_min_cpu_unlock(); + else + cpufreq_pegasusq_min_cpu_lock(input); + return count; +} + +static ssize_t store_hotplug_lock(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + int prev_lock; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + input = min(input, num_possible_cpus()); + prev_lock = atomic_read(&dbs_tuners_ins.hotplug_lock); + + if (prev_lock) + cpufreq_pegasusq_cpu_unlock(prev_lock); + + if (input == 0) { + atomic_set(&dbs_tuners_ins.hotplug_lock, 0); + return count; + } + + ret = cpufreq_pegasusq_cpu_lock(input); + if (ret) { + printk(KERN_ERR "[HOTPLUG] already locked with smaller value %d < %d\n", + atomic_read(&g_hotplug_lock), input); + return ret; + } + + atomic_set(&dbs_tuners_ins.hotplug_lock, input); + + return count; +} + +static ssize_t store_dvfs_debug(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.dvfs_debug = input > 0; + return count; +} + +static ssize_t store_up_threshold_at_min_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold_at_min_freq = input; + return count; +} + +static ssize_t store_freq_for_responsiveness(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.freq_for_responsiveness = input; + return count; +} + +static ssize_t store_boostpulse_value(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input * 1000 > 2106000) + input = 2106000; + + Lboostpulse_value = input * 1000; + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(down_differential); +define_one_global_rw(freq_step); +define_one_global_rw(cpu_up_rate); +define_one_global_rw(cpu_down_rate); +define_one_global_rw(cpu_up_freq); +define_one_global_rw(cpu_down_freq); +define_one_global_rw(up_nr_cpus); +define_one_global_rw(max_cpu_lock); +define_one_global_rw(min_cpu_lock); +define_one_global_rw(hotplug_lock); +define_one_global_rw(dvfs_debug); +define_one_global_rw(up_threshold_at_min_freq); +define_one_global_rw(freq_for_responsiveness); +define_one_global_rw(boostpulse_value); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &up_threshold.attr, + &sampling_down_factor.attr, + &ignore_nice_load.attr, + &io_is_busy.attr, + &down_differential.attr, + &freq_step.attr, + &cpu_up_rate.attr, + &cpu_down_rate.attr, + &cpu_up_freq.attr, + &cpu_down_freq.attr, + &up_nr_cpus.attr, + /* priority: hotplug_lock > max_cpu_lock > min_cpu_lock + Exception: hotplug_lock on early_suspend uses min_cpu_lock */ + &max_cpu_lock.attr, + &min_cpu_lock.attr, + &hotplug_lock.attr, + &dvfs_debug.attr, + &hotplug_freq_1_1.attr, + &hotplug_freq_2_0.attr, + &hotplug_freq_2_1.attr, + &hotplug_freq_3_0.attr, + &hotplug_freq_3_1.attr, + &hotplug_freq_4_0.attr, + &hotplug_rq_1_1.attr, + &hotplug_rq_2_0.attr, + &hotplug_rq_2_1.attr, + &hotplug_rq_3_0.attr, + &hotplug_rq_3_1.attr, + &hotplug_rq_4_0.attr, + &up_threshold_at_min_freq.attr, + &freq_for_responsiveness.attr, + &boostpulse_value.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "pegasusq", +}; + +/************************** sysfs end ************************/ + +static void __cpuinit cpu_up_work(struct work_struct *work) +{ + int cpu; + int online = num_online_cpus(); + int nr_up = dbs_tuners_ins.up_nr_cpus; + int min_cpu_lock = dbs_tuners_ins.min_cpu_lock; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock && min_cpu_lock) + nr_up = max(hotplug_lock, min_cpu_lock) - online; + else if (hotplug_lock) + nr_up = hotplug_lock - online; + else if (min_cpu_lock) + nr_up = max(nr_up, min_cpu_lock - online); + + if (online == 1) { + printk(KERN_ERR "CPU_UP 3\n"); + cpu_up(num_possible_cpus() - 1); + nr_up -= 1; + } + + for_each_cpu_not(cpu, cpu_online_mask) { + if (nr_up-- == 0) + break; + if (cpu == 0) + continue; + printk(KERN_ERR "CPU_UP %d\n", cpu); + cpu_up(cpu); + } +} + +static void cpu_down_work(struct work_struct *work) +{ + int cpu; + int online = num_online_cpus(); + int nr_down = 1; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock) + nr_down = online - hotplug_lock; + + for_each_online_cpu(cpu) { + if (cpu == 0) + continue; + printk(KERN_ERR "CPU_DOWN %d\n", cpu); + cpu_down(cpu); + if (--nr_down == 0) + break; + } +} + +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ +#ifndef CONFIG_ARCH_EXYNOS4 + if (p->cur == p->max) + return; +#endif + + __cpufreq_driver_target(p, freq, CPUFREQ_RELATION_L); +} + +/* + * print hotplug debugging info. + * which 1 : UP, 0 : DOWN + */ +static void debug_hotplug_check(int which, int rq_avg, int freq, + struct cpu_usage *usage) +{ + int cpu; + printk(KERN_ERR "CHECK %s rq %d.%02d freq %d [", which ? "up" : "down", + rq_avg / 100, rq_avg % 100, freq); + for_each_online_cpu(cpu) { + printk(KERN_ERR "(%d, %d), ", cpu, usage->load[cpu]); + } + printk(KERN_ERR "]\n"); +} + +static int check_up(void) +{ + int num_hist = hotplug_history->num_hist; + struct cpu_usage *usage; + int freq, rq_avg; + int avg_load; + int i; + int up_rate = dbs_tuners_ins.cpu_up_rate; + int up_freq, up_rq; + int min_freq = INT_MAX; + int min_rq_avg = INT_MAX; + int min_avg_load = INT_MAX; + int online; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock > 0) + return 0; + + online = num_online_cpus(); + up_freq = hotplug_freq[online - 1][HOTPLUG_UP_INDEX]; + up_rq = hotplug_rq[online - 1][HOTPLUG_UP_INDEX]; + + if (online == num_possible_cpus()) + return 0; + + if (dbs_tuners_ins.max_cpu_lock != 0 + && online >= dbs_tuners_ins.max_cpu_lock) + return 0; + + if (dbs_tuners_ins.min_cpu_lock != 0 + && online < dbs_tuners_ins.min_cpu_lock) + return 1; + + if (num_hist == 0 || num_hist % up_rate) + return 0; + + for (i = num_hist - 1; i >= num_hist - up_rate; --i) { + usage = &hotplug_history->usage[i]; + + freq = usage->freq; + rq_avg = usage->rq_avg; + avg_load = usage->avg_load; + + min_freq = min(min_freq, freq); + min_rq_avg = min(min_rq_avg, rq_avg); + min_avg_load = min(min_avg_load, avg_load); + + if (dbs_tuners_ins.dvfs_debug) + debug_hotplug_check(1, rq_avg, freq, usage); + } + + if (min_freq >= up_freq && min_rq_avg > up_rq) { + if (online >= 2) { + if (min_avg_load < 65) + return 0; + } + printk(KERN_ERR "[HOTPLUG IN] %s %d>=%d && %d>%d\n", + __func__, min_freq, up_freq, min_rq_avg, up_rq); + hotplug_history->num_hist = 0; + return 1; + } + return 0; +} + +static int check_down(void) +{ + int num_hist = hotplug_history->num_hist; + struct cpu_usage *usage; + int freq, rq_avg; + int avg_load; + int i; + int down_rate = dbs_tuners_ins.cpu_down_rate; + int down_freq, down_rq; + int max_freq = 0; + int max_rq_avg = 0; + int max_avg_load = 0; + int online; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock > 0) + return 0; + + online = num_online_cpus(); + down_freq = hotplug_freq[online - 1][HOTPLUG_DOWN_INDEX]; + down_rq = hotplug_rq[online - 1][HOTPLUG_DOWN_INDEX]; + + if (online == 1) + return 0; + + if (dbs_tuners_ins.max_cpu_lock != 0 + && online > dbs_tuners_ins.max_cpu_lock) + return 1; + + if (dbs_tuners_ins.min_cpu_lock != 0 + && online <= dbs_tuners_ins.min_cpu_lock) + return 0; + + if (num_hist == 0 || num_hist % down_rate) + return 0; + + for (i = num_hist - 1; i >= num_hist - down_rate; --i) { + usage = &hotplug_history->usage[i]; + + freq = usage->freq; + rq_avg = usage->rq_avg; + avg_load = usage->avg_load; + + max_freq = max(max_freq, freq); + max_rq_avg = max(max_rq_avg, rq_avg); + max_avg_load = max(max_avg_load, avg_load); + + if (dbs_tuners_ins.dvfs_debug) + debug_hotplug_check(0, rq_avg, freq, usage); + } + + if ((max_freq <= down_freq && max_rq_avg <= down_rq) + || (online >= 3 && max_avg_load < 30)) { + printk(KERN_ERR "[HOTPLUG OUT] %s %d<=%d && %d<%d\n", + __func__, max_freq, down_freq, max_rq_avg, down_rq); + hotplug_history->num_hist = 0; + return 1; + } + + return 0; +} + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + unsigned int max_load_freq; + + struct cpufreq_policy *policy; + unsigned int j; + int num_hist = hotplug_history->num_hist; + int max_hotplug_rate = max(dbs_tuners_ins.cpu_up_rate, + dbs_tuners_ins.cpu_down_rate); + int up_threshold = dbs_tuners_ins.up_threshold; + + /* add total_load, avg_load to get average load */ + unsigned int total_load = 0; + unsigned int avg_load = 0; + int load_each[4] = {-1, -1, -1, -1}; + int rq_avg = 0; + policy = this_dbs_info->cur_policy; + + if (boostpulse_relayf) + { + if (boostpulse_relay_sr != 0) + dbs_tuners_ins.sampling_rate = boostpulse_relay_sr; + boostpulse_relayf = false; + if (policy->cur > Lboostpulse_value) + return; + + __cpufreq_driver_target(policy, Lboostpulse_value, + CPUFREQ_RELATION_H); + return; + } + + hotplug_history->usage[num_hist].freq = policy->cur; + hotplug_history->usage[num_hist].rq_avg = get_nr_run_avg(); + + /* add total_load, avg_load to get average load */ + rq_avg = hotplug_history->usage[num_hist].rq_avg; + + ++hotplug_history->num_hist; + + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + cputime64_t prev_wall_time, prev_idle_time, prev_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load, load_freq; + int freq_avg; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + prev_wall_time = j_dbs_info->prev_cpu_wall; + prev_idle_time = j_dbs_info->prev_cpu_idle; + prev_iowait_time = j_dbs_info->prev_cpu_iowait; + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + prev_wall_time); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + prev_idle_time); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, + prev_iowait_time); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + /* keep load of each CPUs and combined load across all CPUs */ + if (cpu_online(j)) + load_each[j] = load; + total_load += load; + + hotplug_history->usage[num_hist].load[j] = load; + + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = load * freq_avg; + if (load_freq > max_load_freq) + max_load_freq = load_freq; + } + /* calculate the average load across all related CPUs */ + avg_load = total_load / num_online_cpus(); + hotplug_history->usage[num_hist].avg_load = avg_load; + //pr_info("LOAD_TIMER - %d - %d - %d - %d", max_load_freq/1000, total_load, avg_load, num_online_cpus()); + + /* Check for CPU hotplug */ + if (check_up()) { + queue_work_on(this_dbs_info->cpu, dvfs_workqueue, + &this_dbs_info->up_work); + } else if (check_down()) { + queue_work_on(this_dbs_info->cpu, dvfs_workqueue, + &this_dbs_info->down_work); + } + if (hotplug_history->num_hist == max_hotplug_rate) + hotplug_history->num_hist = 0; + + /* Check for frequency increase */ + if (policy->cur < dbs_tuners_ins.freq_for_responsiveness) + up_threshold = dbs_tuners_ins.up_threshold_at_min_freq; + /* for fast frequency decrease */ + else + up_threshold = dbs_tuners_ins.up_threshold; + + if (max_load_freq > up_threshold * policy->cur) { + /* for multiple freq_step */ + int inc = policy->max * (dbs_tuners_ins.freq_step + - DEF_FREQ_STEP_DEC * 2) / 100; + int target = 0; + + /* for multiple freq_step */ + if (max_load_freq > (up_threshold + DEF_UP_THRESHOLD_DIFF * 2) + * policy->cur) + inc = policy->max * dbs_tuners_ins.freq_step / 100; + else if (max_load_freq > (up_threshold + DEF_UP_THRESHOLD_DIFF) + * policy->cur) + inc = policy->max * (dbs_tuners_ins.freq_step + - DEF_FREQ_STEP_DEC) / 100; + + target = min(policy->max, policy->cur + inc); + + /* If switching to max speed, apply sampling_down_factor */ + if (policy->cur < policy->max && target == policy->max) + this_dbs_info->rate_mult = + dbs_tuners_ins.sampling_down_factor; + dbs_freq_increase(policy, target); + return; + } + + /* Check for frequency decrease */ +#ifndef CONFIG_ARCH_EXYNOS4 + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) + return; +#endif + + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus DOWN_DIFFERENTIAL points under + * the threshold. + */ + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + unsigned int down_thres; + + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + /* No longer fully busy, reset rate_mult */ + this_dbs_info->rate_mult = 1; + + if (freq_next < policy->min) + freq_next = policy->min; + + + down_thres = dbs_tuners_ins.up_threshold_at_min_freq + - dbs_tuners_ins.down_differential; + + if (freq_next < dbs_tuners_ins.freq_for_responsiveness + && (max_load_freq / freq_next) > down_thres) + freq_next = dbs_tuners_ins.freq_for_responsiveness; + + if (policy->cur == freq_next) + return; + + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } +} + +/*extern void pegasusq_is_active(bool val); + +void boostpulse_relay_pq(void) +{ + if (Lboostpulse_value > 0) + { + //pr_info("BOOST_PULSE_FROM_INTERACTIVE"); + if (dbs_tuners_ins.sampling_rate != min_sampling_rate) + boostpulse_relay_sr = dbs_tuners_ins.sampling_rate; + boostpulse_relayf = true; + dbs_tuners_ins.sampling_rate = min_sampling_rate; + } +}*/ + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int delay; + + mutex_lock(&dbs_info->timer_mutex); + + dbs_check_cpu(dbs_info); + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate + * dbs_info->rate_mult); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + queue_delayed_work_on(cpu, dvfs_workqueue, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(DEF_START_DELAY * 1000 * 1000 + + dbs_tuners_ins.sampling_rate); + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + + queue_delayed_work_on(dbs_info->cpu, dvfs_workqueue, + &dbs_info->work, delay + 2 * HZ); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); + cancel_work_sync(&dbs_info->up_work); + cancel_work_sync(&dbs_info->down_work); +} + +static int reboot_notifier_call(struct notifier_block *this, + unsigned long code, void *_cmd) +{ + atomic_set(&g_hotplug_lock, 1); + return NOTIFY_DONE; +} + +static struct notifier_block reboot_notifier = { + .notifier_call = reboot_notifier_call, +}; + +#ifdef CONFIG_HAS_EARLYSUSPEND +static struct early_suspend early_suspend; +unsigned int prev_freq_step; +unsigned int prev_sampling_rate; +static void cpufreq_pegasusq_early_suspend(struct early_suspend *h) +{ +#if EARLYSUSPEND_HOTPLUGLOCK + dbs_tuners_ins.early_suspend = + atomic_read(&g_hotplug_lock); +#endif + prev_freq_step = dbs_tuners_ins.freq_step; + prev_sampling_rate = dbs_tuners_ins.sampling_rate; + dbs_tuners_ins.freq_step = 10; + dbs_tuners_ins.sampling_rate = 200000; +#if EARLYSUSPEND_HOTPLUGLOCK + atomic_set(&g_hotplug_lock, + (dbs_tuners_ins.min_cpu_lock) ? dbs_tuners_ins.min_cpu_lock : 1); + apply_hotplug_lock(); + stop_rq_work(); +#endif +} +static void cpufreq_pegasusq_late_resume(struct early_suspend *h) +{ +#if EARLYSUSPEND_HOTPLUGLOCK + atomic_set(&g_hotplug_lock, dbs_tuners_ins.early_suspend); +#endif + dbs_tuners_ins.early_suspend = -1; + dbs_tuners_ins.freq_step = prev_freq_step; + dbs_tuners_ins.sampling_rate = prev_sampling_rate; +#if EARLYSUSPEND_HOTPLUGLOCK + apply_hotplug_lock(); + start_rq_work(); +#endif +} +#endif + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + //pegasusq_is_active(true); + + prev_apenable = apget_enable_auto_hotplug(); + apenable_auto_hotplug(false); + + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + dbs_tuners_ins.max_freq = policy->max; + dbs_tuners_ins.min_freq = policy->min; + hotplug_history->num_hist = 0; + start_rq_work(); + + mutex_lock(&dbs_mutex); + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + } + this_dbs_info->cpu = cpu; + this_dbs_info->rate_mult = 1; + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + min_sampling_rate = MIN_SAMPLING_RATE; + dbs_tuners_ins.sampling_rate = DEF_SAMPLING_RATE; + dbs_tuners_ins.io_is_busy = 0; + } + mutex_unlock(&dbs_mutex); + + register_reboot_notifier(&reboot_notifier); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + +#if !EARLYSUSPEND_HOTPLUGLOCK + register_pm_notifier(&pm_notifier); +#endif +#ifdef CONFIG_HAS_EARLYSUSPEND + register_early_suspend(&early_suspend); +#endif + break; + + case CPUFREQ_GOV_STOP: + //pegasusq_is_active(false); + + apenable_auto_hotplug(prev_apenable); + +#ifdef CONFIG_HAS_EARLYSUSPEND + unregister_early_suspend(&early_suspend); +#endif +#if !EARLYSUSPEND_HOTPLUGLOCK + unregister_pm_notifier(&pm_notifier); +#endif + + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + mutex_destroy(&this_dbs_info->timer_mutex); + + unregister_reboot_notifier(&reboot_notifier); + + dbs_enable--; + mutex_unlock(&dbs_mutex); + + stop_rq_work(); + + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, + CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, + CPUFREQ_RELATION_L); + + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +static int __init cpufreq_gov_dbs_init(void) +{ + int ret; + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, 0); + + ret = init_rq_avg(); + if (ret) + return ret; + + INIT_WORK(&dbs_info->up_work, cpu_up_work); + INIT_WORK(&dbs_info->down_work, cpu_down_work); + + hotplug_history = kzalloc(sizeof(struct cpu_usage_history), GFP_KERNEL); + if (!hotplug_history) { + pr_err("%s cannot create hotplug history array\n", __func__); + ret = -ENOMEM; + goto err_hist; + } + + dvfs_workqueue = create_workqueue("kpegasusq"); + if (!dvfs_workqueue) { + pr_err("%s cannot create workqueue\n", __func__); + ret = -ENOMEM; + goto err_queue; + } + + ret = cpufreq_register_governor(&cpufreq_gov_pegasusq); + if (ret) + goto err_reg; + +#ifdef CONFIG_HAS_EARLYSUSPEND + early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; + early_suspend.suspend = cpufreq_pegasusq_early_suspend; + early_suspend.resume = cpufreq_pegasusq_late_resume; +#endif + + return ret; + +err_reg: + destroy_workqueue(dvfs_workqueue); +err_queue: + kfree(hotplug_history); +err_hist: + kfree(rq_data); + return ret; +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_pegasusq); + destroy_workqueue(dvfs_workqueue); + kfree(hotplug_history); + kfree(rq_data); +} + +MODULE_AUTHOR("ByungChang Cha "); +MODULE_DESCRIPTION("'cpufreq_pegasusq' - A dynamic cpufreq/cpuhotplug governor"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PEGASUSQ +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_smartassH3.c b/drivers/cpufreq/cpufreq_smartassH3.c new file mode 100644 index 00000000..7e0891ed --- /dev/null +++ b/drivers/cpufreq/cpufreq_smartassH3.c @@ -0,0 +1,904 @@ +/* + * drivers/cpufreq/cpufreq_smartassH3.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Erasmux + * + * Based on the interactive governor By Mike Chan (mike@android.com) + * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net) + * + * SMP support based on mod by faux123 + * + * ZTE Skate specific tweaks by H3ROS @ MoDaCo, integrated by C3C0 @ MoDaCo + * + * For a general overview of smartassV2 see the relavent part in + * Documentation/cpu-freq/governors.txt + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/******************** Tunable parameters: ********************/ + +/* + * The "ideal" frequency to use when awake. The governor will ramp up faster + * towards the ideal frequency and slower after it has passed it. Similarly, + * lowering the frequency towards the ideal frequency is faster than below it. + */ +#define DEFAULT_AWAKE_IDEAL_FREQ 378000 +static unsigned int awake_ideal_freq; + +/* + * The "ideal" frequency to use when suspended. + * When set to 0, the governor will not track the suspended state (meaning + * that practically when sleep_ideal_freq==0 the awake_ideal_freq is used + * also when suspended). + */ +#define DEFAULT_SLEEP_IDEAL_FREQ 378000 +static unsigned int sleep_ideal_freq; + +/* + * Freqeuncy delta when ramping up above the ideal freqeuncy. + * Zero disables and causes to always jump straight to max frequency. + * When below the ideal freqeuncy we always ramp up to the ideal freq. + */ +#define DEFAULT_RAMP_UP_STEP 80000 +static unsigned int ramp_up_step; + +/* + * Freqeuncy delta when ramping down below the ideal freqeuncy. + * Zero disables and will calculate ramp down according to load heuristic. + * When above the ideal freqeuncy we always ramp down to the ideal freq. + */ +#define DEFAULT_RAMP_DOWN_STEP 80000 +static unsigned int ramp_down_step; + +/* + * CPU freq will be increased if measured load > max_cpu_load; + */ +#define DEFAULT_MAX_CPU_LOAD 85 +static unsigned long max_cpu_load; + +/* + * CPU freq will be decreased if measured load < min_cpu_load; + */ +#define DEFAULT_MIN_CPU_LOAD 70 +static unsigned long min_cpu_load; + +/* + * The minimum amount of time to spend at a frequency before we can ramp up. + * Notice we ignore this when we are below the ideal frequency. + */ +#define DEFAULT_UP_RATE_US 48000; +static unsigned long up_rate_us; + +/* + * The minimum amount of time to spend at a frequency before we can ramp down. + * Notice we ignore this when we are above the ideal frequency. + */ +#define DEFAULT_DOWN_RATE_US 49000; +static unsigned long down_rate_us; + +/* + * The frequency to set when waking up from sleep. + * When sleep_ideal_freq=0 this will have no effect. + */ +#define DEFAULT_SLEEP_WAKEUP_FREQ 99999999 +static unsigned int sleep_wakeup_freq; + +/* + * Sampling rate, I highly recommend to leave it at 2. + */ +#define DEFAULT_SAMPLE_RATE_JIFFIES 2 +static unsigned int sample_rate_jiffies; + + +/*************** End of tunables ***************/ + + +static void (*pm_idle_old)(void); +static atomic_t active_count = ATOMIC_INIT(0); + +struct smartass_info_s { + struct cpufreq_policy *cur_policy; + struct cpufreq_frequency_table *freq_table; + struct timer_list timer; + u64 time_in_idle; + u64 idle_exit_time; + u64 freq_change_time; + u64 freq_change_time_in_idle; + int cur_cpu_load; + int old_freq; + int ramp_dir; + unsigned int enable; + int ideal_speed; +}; +static DEFINE_PER_CPU(struct smartass_info_s, smartass_info); + +/* Workqueues handle frequency scaling */ +static struct workqueue_struct *up_wq; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_work; + +static cpumask_t work_cpumask; +static spinlock_t cpumask_lock; + +static unsigned int suspended; + +#define dprintk(flag,msg...) do { \ + if (debug_mask & flag) printk(KERN_DEBUG msg); \ + } while (0) + +enum { + SMARTASS_DEBUG_JUMPS=1, + SMARTASS_DEBUG_LOAD=2, + SMARTASS_DEBUG_ALG=4 +}; + +/* + * Combination of the above debug flags. + */ +static unsigned long debug_mask; + +static int cpufreq_governor_smartass_h3(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASSH3 +static +#endif +struct cpufreq_governor cpufreq_gov_smartass_h3 = { + .name = "smartassH3", + .governor = cpufreq_governor_smartass_h3, + .max_transition_latency = 9000000, + .owner = THIS_MODULE, +}; + +inline static void smartass_update_min_max(struct smartass_info_s *this_smartass, struct cpufreq_policy *policy, int suspend) { + if (suspend) { + this_smartass->ideal_speed = // sleep_ideal_freq; but make sure it obeys the policy min/max + policy->max > sleep_ideal_freq ? + (sleep_ideal_freq > policy->min ? sleep_ideal_freq : policy->min) : policy->max; + } else { + this_smartass->ideal_speed = // awake_ideal_freq; but make sure it obeys the policy min/max + policy->min < awake_ideal_freq ? + (awake_ideal_freq < policy->max ? awake_ideal_freq : policy->max) : policy->min; + } +} + +inline static void smartass_update_min_max_allcpus(void) { + unsigned int i; + for_each_online_cpu(i) { + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, i); + if (this_smartass->enable) + smartass_update_min_max(this_smartass,this_smartass->cur_policy,suspended); + } +} + +inline static unsigned int validate_freq(struct cpufreq_policy *policy, int freq) { + if (freq > (int)policy->max) + return policy->max; + if (freq < (int)policy->min) + return policy->min; + return freq; +} + +inline static void reset_timer(unsigned long cpu, struct smartass_info_s *this_smartass) { + this_smartass->time_in_idle = get_cpu_idle_time_us(cpu, &this_smartass->idle_exit_time); + mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies); +} + +inline static void work_cpumask_set(unsigned long cpu) { + unsigned long flags; + spin_lock_irqsave(&cpumask_lock, flags); + cpumask_set_cpu(cpu, &work_cpumask); + spin_unlock_irqrestore(&cpumask_lock, flags); +} + +inline static int work_cpumask_test_and_clear(unsigned long cpu) { + unsigned long flags; + int res = 0; + spin_lock_irqsave(&cpumask_lock, flags); + res = cpumask_test_and_clear_cpu(cpu, &work_cpumask); + spin_unlock_irqrestore(&cpumask_lock, flags); + return res; +} + +inline static int target_freq(struct cpufreq_policy *policy, struct smartass_info_s *this_smartass, + int new_freq, int old_freq, int prefered_relation) { + int index, target; + struct cpufreq_frequency_table *table = this_smartass->freq_table; + + if (new_freq == old_freq) + return 0; + new_freq = validate_freq(policy,new_freq); + if (new_freq == old_freq) + return 0; + + if (table && + !cpufreq_frequency_table_target(policy,table,new_freq,prefered_relation,&index)) + { + target = table[index].frequency; + if (target == old_freq) { + // if for example we are ramping up to *at most* current + ramp_up_step + // but there is no such frequency higher than the current, try also + // to ramp up to *at least* current + ramp_up_step. + if (new_freq > old_freq && prefered_relation==CPUFREQ_RELATION_H + && !cpufreq_frequency_table_target(policy,table,new_freq, + CPUFREQ_RELATION_L,&index)) + target = table[index].frequency; + // simlarly for ramping down: + else if (new_freq < old_freq && prefered_relation==CPUFREQ_RELATION_L + && !cpufreq_frequency_table_target(policy,table,new_freq, + CPUFREQ_RELATION_H,&index)) + target = table[index].frequency; + } + + if (target == old_freq) { + // We should not get here: + // If we got here we tried to change to a validated new_freq which is different + // from old_freq, so there is no reason for us to remain at same frequency. + printk(KERN_WARNING "Smartass: frequency change failed: %d to %d => %d\n", + old_freq,new_freq,target); + return 0; + } + } + else target = new_freq; + + __cpufreq_driver_target(policy, target, prefered_relation); + + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassQ: jumping from %d to %d => %d (%d)\n", + old_freq,new_freq,target,policy->cur); + + return target; +} + +static void cpufreq_smartass_timer(unsigned long cpu) +{ + u64 delta_idle; + u64 delta_time; + int cpu_load; + int old_freq; + u64 update_time; + u64 now_idle; + int queued_work = 0; + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); + struct cpufreq_policy *policy = this_smartass->cur_policy; + + now_idle = get_cpu_idle_time_us(cpu, &update_time); + old_freq = policy->cur; + + if (this_smartass->idle_exit_time == 0 || update_time == this_smartass->idle_exit_time) + return; + + delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle); + delta_time = cputime64_sub(update_time, this_smartass->idle_exit_time); + + // If timer ran less than 1ms after short-term sample started, retry. + if (delta_time < 1000) { + if (!timer_pending(&this_smartass->timer)) + reset_timer(cpu,this_smartass); + return; + } + + if (delta_idle > delta_time) + cpu_load = 0; + else + cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time; + + dprintk(SMARTASS_DEBUG_LOAD,"smartassT @ %d: load %d (delta_time %llu)\n", + old_freq,cpu_load,delta_time); + + this_smartass->cur_cpu_load = cpu_load; + this_smartass->old_freq = old_freq; + + // Scale up if load is above max or if there where no idle cycles since coming out of idle, + // additionally, if we are at or above the ideal_speed, verify we have been at this frequency + // for at least up_rate_us: + if (cpu_load > max_cpu_load || delta_idle == 0) + { + if (old_freq < policy->max && + (old_freq < this_smartass->ideal_speed || delta_idle == 0 || + cputime64_sub(update_time, this_smartass->freq_change_time) >= up_rate_us)) + { + dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp up: load %d (delta_idle %llu)\n", + old_freq,cpu_load,delta_idle); + this_smartass->ramp_dir = 1; + work_cpumask_set(cpu); + queue_work(up_wq, &freq_scale_work); + queued_work = 1; + } + else this_smartass->ramp_dir = 0; + } + // Similarly for scale down: load should be below min and if we are at or below ideal + // frequency we require that we have been at this frequency for at least down_rate_us: + else if (cpu_load < min_cpu_load && old_freq > policy->min && + (old_freq > this_smartass->ideal_speed || + cputime64_sub(update_time, this_smartass->freq_change_time) >= down_rate_us)) + { + dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp down: load %d (delta_idle %llu)\n", + old_freq,cpu_load,delta_idle); + this_smartass->ramp_dir = -1; + work_cpumask_set(cpu); + queue_work(down_wq, &freq_scale_work); + queued_work = 1; + } + else this_smartass->ramp_dir = 0; + + // To avoid unnecessary load when the CPU is already at high load, we don't + // reset ourselves if we are at max speed. If and when there are idle cycles, + // the idle loop will activate the timer. + // Additionally, if we queued some work, the work task will reset the timer + // after it has done its adjustments. + if (!queued_work && old_freq < policy->max) + reset_timer(cpu,this_smartass); +} + +static void cpufreq_idle(void) +{ + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + + if (!this_smartass->enable) { + pm_idle_old(); + return; + } + + if (policy->cur == policy->min && timer_pending(&this_smartass->timer)) + del_timer(&this_smartass->timer); + + pm_idle_old(); + + if (!timer_pending(&this_smartass->timer)) + reset_timer(smp_processor_id(), this_smartass); +} + +static int cpufreq_idle_notifier(struct notifier_block *nb, + unsigned long val, void *data) { + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + + if (!this_smartass->enable) + return NOTIFY_DONE; + + if (val == IDLE_START) { + if (policy->cur == policy->max && !timer_pending(&this_smartass->timer)) { + reset_timer(smp_processor_id(), this_smartass); + } else if (policy->cur == policy->min) { + if (timer_pending(&this_smartass->timer)) + del_timer(&this_smartass->timer); + } + } else if (val == IDLE_END) { + if (policy->cur == policy->min && !timer_pending(&this_smartass->timer)) + reset_timer(smp_processor_id(), this_smartass); + } + + return NOTIFY_OK; +} +static struct notifier_block cpufreq_idle_nb = { + .notifier_call = cpufreq_idle_notifier, +}; + +/* We use the same work function to sale up and down */ +static void cpufreq_smartass_freq_change_time_work(struct work_struct *work) +{ + unsigned int cpu; + int new_freq; + int old_freq; + int ramp_dir; + struct smartass_info_s *this_smartass; + struct cpufreq_policy *policy; + unsigned int relation = CPUFREQ_RELATION_L; + for_each_possible_cpu(cpu) { + this_smartass = &per_cpu(smartass_info, cpu); + if (!work_cpumask_test_and_clear(cpu)) + continue; + + ramp_dir = this_smartass->ramp_dir; + this_smartass->ramp_dir = 0; + + old_freq = this_smartass->old_freq; + policy = this_smartass->cur_policy; + + if (old_freq != policy->cur) { + // frequency was changed by someone else? + printk(KERN_WARNING "Smartass: frequency changed by 3rd party: %d to %d\n", + old_freq,policy->cur); + new_freq = old_freq; + } + else if (ramp_dir > 0 && nr_running() > 1) { + // ramp up logic: + if (old_freq < this_smartass->ideal_speed) + new_freq = this_smartass->ideal_speed; + else if (ramp_up_step) { + new_freq = old_freq + ramp_up_step; + relation = CPUFREQ_RELATION_H; + } + else { + new_freq = policy->max; + relation = CPUFREQ_RELATION_H; + } + dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp up: ramp_dir=%d ideal=%d\n", + old_freq,ramp_dir,this_smartass->ideal_speed); + } + else if (ramp_dir < 0) { + // ramp down logic: + if (old_freq > this_smartass->ideal_speed) { + new_freq = this_smartass->ideal_speed; + relation = CPUFREQ_RELATION_H; + } + else if (ramp_down_step) + new_freq = old_freq - ramp_down_step; + else { + // Load heuristics: Adjust new_freq such that, assuming a linear + // scaling of load vs. frequency, the load in the new frequency + // will be max_cpu_load: + new_freq = old_freq * this_smartass->cur_cpu_load / max_cpu_load; + if (new_freq > old_freq) // min_cpu_load > max_cpu_load ?! + new_freq = old_freq -1; + } + dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp down: ramp_dir=%d ideal=%d\n", + old_freq,ramp_dir,this_smartass->ideal_speed); + } + else { // ramp_dir==0 ?! Could the timer change its mind about a queued ramp up/down + // before the work task gets to run? + // This may also happen if we refused to ramp up because the nr_running()==1 + new_freq = old_freq; + dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d nothing: ramp_dir=%d nr_running=%lu\n", + old_freq,ramp_dir,nr_running()); + } + + // do actual ramp up (returns 0, if frequency change failed): + new_freq = target_freq(policy,this_smartass,new_freq,old_freq,relation); + if (new_freq) + this_smartass->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time); + + // reset timer: + if (new_freq < policy->max) + reset_timer(cpu,this_smartass); + // if we are maxed out, it is pointless to use the timer + // (idle cycles wake up the timer when the timer comes) + else if (timer_pending(&this_smartass->timer)) + del_timer(&this_smartass->timer); + + cpufreq_notify_utilization(policy, + (this_smartass->cur_cpu_load * policy->cur) / policy->max); + } +} + +static ssize_t show_debug_mask(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", debug_mask); +} + +static ssize_t store_debug_mask(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0) + debug_mask = input; + return res; +} + +static ssize_t show_up_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", up_rate_us); +} + +static ssize_t store_up_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0 && input <= 100000000) + up_rate_us = input; + return res; +} + +static ssize_t show_down_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", down_rate_us); +} + +static ssize_t store_down_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0 && input <= 100000000) + down_rate_us = input; + return res; +} + +static ssize_t show_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sleep_ideal_freq); +} + +static ssize_t store_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) { + sleep_ideal_freq = input; + if (suspended) + smartass_update_min_max_allcpus(); + } + return res; +} + +static ssize_t show_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sleep_wakeup_freq); +} + +static ssize_t store_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + sleep_wakeup_freq = input; + return res; +} + +static ssize_t show_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", awake_ideal_freq); +} + +static ssize_t store_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) { + awake_ideal_freq = input; + if (!suspended) + smartass_update_min_max_allcpus(); + } + return res; +} + +static ssize_t show_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sample_rate_jiffies); +} + +static ssize_t store_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 1000) + sample_rate_jiffies = input; + return res; +} + +static ssize_t show_ramp_up_step(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", ramp_up_step); +} + +static ssize_t store_ramp_up_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + ramp_up_step = input; + return res; +} + +static ssize_t show_ramp_down_step(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", ramp_down_step); +} + +static ssize_t store_ramp_down_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + ramp_down_step = input; + return res; +} + +static ssize_t show_max_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", max_cpu_load); +} + +static ssize_t store_max_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 100) + max_cpu_load = input; + return res; +} + +static ssize_t show_min_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", min_cpu_load); +} + +static ssize_t store_min_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input < 100) + min_cpu_load = input; + return res; +} + +#define define_global_rw_attr(_name) \ +static struct global_attr _name##_attr = \ + __ATTR(_name, 0644, show_##_name, store_##_name) + +define_global_rw_attr(debug_mask); +define_global_rw_attr(up_rate_us); +define_global_rw_attr(down_rate_us); +define_global_rw_attr(sleep_ideal_freq); +define_global_rw_attr(sleep_wakeup_freq); +define_global_rw_attr(awake_ideal_freq); +define_global_rw_attr(sample_rate_jiffies); +define_global_rw_attr(ramp_up_step); +define_global_rw_attr(ramp_down_step); +define_global_rw_attr(max_cpu_load); +define_global_rw_attr(min_cpu_load); + +static struct attribute * smartass_attributes[] = { + &debug_mask_attr.attr, + &up_rate_us_attr.attr, + &down_rate_us_attr.attr, + &sleep_ideal_freq_attr.attr, + &sleep_wakeup_freq_attr.attr, + &awake_ideal_freq_attr.attr, + &sample_rate_jiffies_attr.attr, + &ramp_up_step_attr.attr, + &ramp_down_step_attr.attr, + &max_cpu_load_attr.attr, + &min_cpu_load_attr.attr, + NULL, +}; + +static struct attribute_group smartass_attr_group = { + .attrs = smartass_attributes, + .name = "smartassH3", +}; + +static int cpufreq_governor_smartass_h3(struct cpufreq_policy *new_policy, + unsigned int event) +{ + unsigned int cpu = new_policy->cpu; + int rc; + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!new_policy->cur)) + return -EINVAL; + + this_smartass->cur_policy = new_policy; + + this_smartass->enable = 1; + + smartass_update_min_max(this_smartass,new_policy,suspended); + + this_smartass->freq_table = cpufreq_frequency_get_table(cpu); + if (!this_smartass->freq_table) + printk(KERN_WARNING "Smartass: no frequency table for cpu %d?!\n",cpu); + + smp_wmb(); + + // Do not register the idle hook and create sysfs + // entries if we have already done so. + if (atomic_inc_return(&active_count) <= 1) { + rc = sysfs_create_group(cpufreq_global_kobject, + &smartass_attr_group); + if (rc) + return rc; + + pm_idle_old = pm_idle; + pm_idle = cpufreq_idle; + idle_notifier_register(&cpufreq_idle_nb); + } + + if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer)) + reset_timer(cpu,this_smartass); + + break; + + case CPUFREQ_GOV_LIMITS: + smartass_update_min_max(this_smartass,new_policy,suspended); + + if (this_smartass->cur_policy->cur > new_policy->max) { + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new max freq: %d\n",new_policy->max); + __cpufreq_driver_target(this_smartass->cur_policy, + new_policy->max, CPUFREQ_RELATION_H); + } + else if (this_smartass->cur_policy->cur < new_policy->min) { + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new min freq: %d\n",new_policy->min); + __cpufreq_driver_target(this_smartass->cur_policy, + new_policy->min, CPUFREQ_RELATION_L); + } + + if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer)) + reset_timer(cpu,this_smartass); + + break; + + case CPUFREQ_GOV_STOP: + this_smartass->enable = 0; + smp_wmb(); + del_timer(&this_smartass->timer); + flush_work(&freq_scale_work); + this_smartass->idle_exit_time = 0; + + if (atomic_dec_return(&active_count) <= 1) { + sysfs_remove_group(cpufreq_global_kobject, + &smartass_attr_group); + pm_idle = pm_idle_old; + idle_notifier_unregister(&cpufreq_idle_nb); + } + break; + } + + return 0; +} + +static void smartass_suspend(int cpu, int suspend) +{ + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + unsigned int new_freq; + + if (!this_smartass->enable) + return; + + smartass_update_min_max(this_smartass,policy,suspend); + if (!suspend) { // resume at max speed: + new_freq = validate_freq(policy,sleep_wakeup_freq); + + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: awaking at %d\n",new_freq); + + __cpufreq_driver_target(policy, new_freq, + CPUFREQ_RELATION_L); + } else { + // to avoid wakeup issues with quick sleep/wakeup don't change actual frequency when entering sleep + // to allow some time to settle down. Instead we just reset our statistics (and reset the timer). + // Eventually, the timer will adjust the frequency if necessary. + + this_smartass->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time); + + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: suspending at %d\n",policy->cur); + } + + reset_timer(smp_processor_id(),this_smartass); +} + +static void smartass_early_suspend(struct early_suspend *handler) { + int i; + if (suspended || sleep_ideal_freq==0) // disable behavior for sleep_ideal_freq==0 + return; + suspended = 1; + for_each_online_cpu(i) + smartass_suspend(i,1); +} + +static void smartass_late_resume(struct early_suspend *handler) { + int i; + if (!suspended) // already not suspended so nothing to do + return; + suspended = 0; + for_each_online_cpu(i) + smartass_suspend(i,0); +} + +static struct early_suspend smartass_power_suspend = { + .suspend = smartass_early_suspend, + .resume = smartass_late_resume, +#ifdef CONFIG_MACH_HERO + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +#endif +}; + +static int __init cpufreq_smartass_init(void) +{ + unsigned int i; + struct smartass_info_s *this_smartass; + debug_mask = 0; + up_rate_us = DEFAULT_UP_RATE_US; + down_rate_us = DEFAULT_DOWN_RATE_US; + sleep_ideal_freq = DEFAULT_SLEEP_IDEAL_FREQ; + sleep_wakeup_freq = DEFAULT_SLEEP_WAKEUP_FREQ; + awake_ideal_freq = DEFAULT_AWAKE_IDEAL_FREQ; + sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES; + ramp_up_step = DEFAULT_RAMP_UP_STEP; + ramp_down_step = DEFAULT_RAMP_DOWN_STEP; + max_cpu_load = DEFAULT_MAX_CPU_LOAD; + min_cpu_load = DEFAULT_MIN_CPU_LOAD; + + spin_lock_init(&cpumask_lock); + + suspended = 0; + + /* Initalize per-cpu data: */ + for_each_possible_cpu(i) { + this_smartass = &per_cpu(smartass_info, i); + this_smartass->enable = 0; + this_smartass->cur_policy = 0; + this_smartass->ramp_dir = 0; + this_smartass->time_in_idle = 0; + this_smartass->idle_exit_time = 0; + this_smartass->freq_change_time = 0; + this_smartass->freq_change_time_in_idle = 0; + this_smartass->cur_cpu_load = 0; + // intialize timer: + init_timer_deferrable(&this_smartass->timer); + this_smartass->timer.function = cpufreq_smartass_timer; + this_smartass->timer.data = i; + work_cpumask_test_and_clear(i); + } + + // Scale up is high priority + up_wq = create_workqueue("ksmartass_up"); + down_wq = create_workqueue("ksmartass_down"); + if (!up_wq || !down_wq) + return -ENOMEM; + + INIT_WORK(&freq_scale_work, cpufreq_smartass_freq_change_time_work); + + register_early_suspend(&smartass_power_suspend); + + return cpufreq_register_governor(&cpufreq_gov_smartass_h3); +} + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASSH3 +fs_initcall(cpufreq_smartass_init); +#else +module_init(cpufreq_smartass_init); +#endif + +static void __exit cpufreq_smartass_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_smartass_h3); + destroy_workqueue(up_wq); + destroy_workqueue(down_wq); +} + +module_exit(cpufreq_smartass_exit); + +MODULE_AUTHOR ("Erasmux, moded by H3ROS & C3C0"); +MODULE_DESCRIPTION ("'cpufreq_smartassH3' - A smart cpufreq governor"); +MODULE_LICENSE ("GPL"); + diff --git a/drivers/cpufreq/cpufreq_wheatley.c b/drivers/cpufreq/cpufreq_wheatley.c new file mode 100644 index 00000000..a020121f --- /dev/null +++ b/drivers/cpufreq/cpufreq_wheatley.c @@ -0,0 +1,839 @@ +/* + * drivers/cpufreq/cpufreq_wheatley.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * (C) 2012 Ezekeel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) +#define DEF_FREQUENCY_UP_THRESHOLD (80) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (100000) +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +#define MICRO_FREQUENCY_UP_THRESHOLD (95) +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (50000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) +#define DEF_TARGET_RESIDENCY (10000) +#define DEF_ALLOWED_MISSES (5) + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. + * All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate, num_misses; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (20) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_WHEATLEY +static +#endif +struct cpufreq_governor cpufreq_gov_wheatley = { + .name = "wheatley", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_lo; + unsigned int freq_lo_jiffies; + unsigned int freq_hi_jiffies; + unsigned int rate_mult; + int cpu; + unsigned int sample_type:1; + unsigned long long prev_idletime; + unsigned long long prev_idleusage; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); + +DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int ignore_nice; + unsigned int sampling_down_factor; + unsigned int powersave_bias; + unsigned int io_is_busy; + unsigned int target_residency; + unsigned int allowed_misses; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .ignore_nice = 0, + .powersave_bias = 0, + .target_residency = DEF_TARGET_RESIDENCY, + .allowed_misses = DEF_ALLOWED_MISSES, +}; + +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, + u64 *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, NULL); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + else + idle_time += get_cpu_iowait_time_us(cpu, wall); + + return idle_time; +} + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +/* + * Find right freq to be set now with powersave_bias on. + * Returns the freq_hi to be used right now and will set freq_hi_jiffies, + * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. + */ +static unsigned int powersave_bias_target(struct cpufreq_policy *policy, + unsigned int freq_next, + unsigned int relation) +{ + unsigned int freq_req, freq_reduc, freq_avg; + unsigned int freq_hi, freq_lo; + unsigned int index = 0; + unsigned int jiffies_total, jiffies_hi, jiffies_lo; + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, + policy->cpu); + + if (!dbs_info->freq_table) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_next; + } + + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, + relation, &index); + freq_req = dbs_info->freq_table[index].frequency; + freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; + freq_avg = freq_req - freq_reduc; + + /* Find freq bounds for freq_avg in freq_table */ + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_H, &index); + freq_lo = dbs_info->freq_table[index].frequency; + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_L, &index); + freq_hi = dbs_info->freq_table[index].frequency; + + /* Find out how long we have to be in hi and lo freqs */ + if (freq_hi == freq_lo) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_lo; + } + jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + jiffies_hi = (freq_avg - freq_lo) * jiffies_total; + jiffies_hi += ((freq_hi - freq_lo) / 2); + jiffies_hi /= (freq_hi - freq_lo); + jiffies_lo = jiffies_total - jiffies_hi; + dbs_info->freq_lo = freq_lo; + dbs_info->freq_lo_jiffies = jiffies_lo; + dbs_info->freq_hi_jiffies = jiffies_hi; + return freq_hi; +} + +static void wheatley_powersave_bias_init_cpu(int cpu) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); + dbs_info->freq_lo = 0; +} + +static void wheatley_powersave_bias_init(void) +{ + int i; + for_each_online_cpu(i) { + wheatley_powersave_bias_init_cpu(i); + } +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_min); + +/* cpufreq_wheatley Governor Tunables */ +#define show_one(file_name, object) \ + static ssize_t show_##file_name \ + (struct kobject *kobj, struct attribute *attr, char *buf) \ + { \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ + } +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(sampling_down_factor, sampling_down_factor); +show_one(ignore_nice_load, ignore_nice); +show_one(powersave_bias, powersave_bias); +show_one(target_residency, target_residency); +show_one(allowed_misses, allowed_misses); + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.io_is_busy = !!input; + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold = input; + return count; +} + +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input, j; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + dbs_tuners_ins.sampling_down_factor = input; + + /* Reset down sampling multiplier in case it was active */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->rate_mult = 1; + } + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + + } + return count; +} + +static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input > 1000) + input = 1000; + + dbs_tuners_ins.powersave_bias = input; + wheatley_powersave_bias_init(); + return count; +} + +static ssize_t store_target_residency(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.target_residency = input; + return count; +} + +static ssize_t store_allowed_misses(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.allowed_misses = input; + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(powersave_bias); +define_one_global_rw(target_residency); +define_one_global_rw(allowed_misses); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &up_threshold.attr, + &sampling_down_factor.attr, + &ignore_nice_load.attr, + &powersave_bias.attr, + &io_is_busy.attr, + &target_residency.attr, + &allowed_misses.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "wheatley", +}; + +/************************** sysfs end ************************/ + +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ + if (dbs_tuners_ins.powersave_bias) + freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); + else if (p->cur == p->max) + return; + + __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? + CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); +} + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + unsigned int max_load_freq; + + struct cpufreq_policy *policy; + unsigned int j; + + unsigned long total_idletime, total_usage; + + this_dbs_info->freq_lo = 0; + policy = this_dbs_info->cur_policy; + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate, we look for a the lowest + * frequency which can sustain the load while keeping idle time over + * 30%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of current frequency + */ + + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + total_idletime = 0; + total_usage = 0; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load, load_freq; + int freq_avg; + struct cpuidle_device * j_cpuidle_dev = NULL; +// struct cpuidle_state * deepidle_state = NULL; +// unsigned long long deepidle_time, deepidle_usage; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) (cur_wall_time - j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) (cur_idle_time - j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) (cur_iowait_time - j_dbs_info->prev_cpu_iowait); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + /* + * For the purpose of wheatley, waiting for disk IO is an + * indication that you're performance critical, and not that + * the system is actually idle. So subtract the iowait time + * from the cpu idle time. + */ + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = load * freq_avg; + if (load_freq > max_load_freq) + max_load_freq = load_freq; + + j_cpuidle_dev = per_cpu(cpuidle_devices, j); + +/* + if (j_cpuidle_dev) + deepidle_state = &j_cpuidle_dev->states[j_cpuidle_dev->state_count - 1]; + + if (deepidle_state) { + deepidle_time = deepidle_state->time; + deepidle_usage = deepidle_state->usage; + + total_idletime += (unsigned long)(deepidle_time - j_dbs_info->prev_idletime); + total_usage += (unsigned long)(deepidle_usage - j_dbs_info->prev_idleusage); + + j_dbs_info->prev_idletime = deepidle_time; + j_dbs_info->prev_idleusage = deepidle_usage; + } +*/ + } + + if (total_usage > 0 && total_idletime / total_usage >= dbs_tuners_ins.target_residency) { + if (num_misses > 0) + num_misses--; + } else { + if (num_misses <= dbs_tuners_ins.allowed_misses) + num_misses++; + } + + /* Check for frequency increase */ + if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur + || num_misses <= dbs_tuners_ins.allowed_misses) { + /* If switching to max speed, apply sampling_down_factor */ + if (policy->cur < policy->max) + this_dbs_info->rate_mult = + dbs_tuners_ins.sampling_down_factor; + dbs_freq_increase(policy, policy->max); + return; + } + + /* Check for frequency decrease */ + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) + return; + + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + /* No longer fully busy, reset rate_mult */ + this_dbs_info->rate_mult = 1; + + if (freq_next < policy->min) + freq_next = policy->min; + + if (!dbs_tuners_ins.powersave_bias) { + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } else { + int freq = powersave_bias_target(policy, freq_next, + CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int sample_type = dbs_info->sample_type; + + int delay; + + mutex_lock(&dbs_info->timer_mutex); + + /* Common NORMAL_SAMPLE setup */ + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + if (!dbs_tuners_ins.powersave_bias || + sample_type == DBS_NORMAL_SAMPLE) { + dbs_check_cpu(dbs_info); + if (dbs_info->freq_lo) { + /* Setup timer for SUB_SAMPLE */ + dbs_info->sample_type = DBS_SUB_SAMPLE; + delay = dbs_info->freq_hi_jiffies; + } else { + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate + * dbs_info->rate_mult); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + } + } else { + __cpufreq_driver_target(dbs_info->cur_policy, + dbs_info->freq_lo, CPUFREQ_RELATION_H); + delay = dbs_info->freq_lo_jiffies; + } + schedule_delayed_work_on(cpu, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); +} + +/* + * Not all CPUs want IO time to be accounted as busy; this dependson how + * efficient idling at a higher frequency/voltage is. + * Pavel Machek says this is not so for various generations of AMD and old + * Intel systems. + * Mike Chan (androidlcom) calis this is also not true for ARM. + * Because of this, whitelist specific known (series) of CPUs by default, and + * leave all others up to the user. + */ +static int should_io_be_busy(void) +{ +#if defined(CONFIG_X86) + /* + * For Intel, Core 2 (model 15) andl later have an efficient idle. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model >= 15) + return 1; +#endif + return 0; +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + } + this_dbs_info->cpu = cpu; + this_dbs_info->rate_mult = 1; + wheatley_powersave_bias_init_cpu(cpu); + num_misses = 0; + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); + dbs_tuners_ins.io_is_busy = should_io_be_busy(); + } + mutex_unlock(&dbs_mutex); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + mutex_destroy(&this_dbs_info->timer_mutex); + dbs_enable--; + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +static int __init cpufreq_gov_dbs_init(void) +{ + u64 idle_time; + int cpu = get_cpu(); + + idle_time = get_cpu_idle_time_us(cpu, NULL); + put_cpu(); + if (idle_time != -1ULL) { + /* Idle micro accounting is supported. Use finer thresholds */ + dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + dbs_tuners_ins.down_differential = + MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + /* + * In no_hz/micro accounting case we set the minimum frequency + * not depending on HZ, but fixed (very low). The deferred + * timer might skip some samples if idle/sleeping as needed. + */ + min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + } else { + /* For correct statistics, we need 10 ticks for each measure */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + } + + return cpufreq_register_governor(&cpufreq_gov_wheatley); +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_wheatley); +} + + +MODULE_AUTHOR("Venkatesh Pallipadi "); +MODULE_AUTHOR("Alexey Starikovskiy "); +MODULE_AUTHOR("Ezekeel "); +MODULE_DESCRIPTION("'cpufreq_wheatley' - A dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_WHEATLEY +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); From 3b947876fed4749007e08f970be01ea1be1e0fec Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 04:46:44 -0400 Subject: [PATCH 03/35] fix: warnings: 'sport' may be used uninitialized in this function Signed-off-by: Dennis Rassmann Conflicts: net/netfilter/xt_socket.c --- drivers/cpufreq/Kconfig | 62 - drivers/cpufreq/cpufreq_interactive.c | 1259 ---------------- drivers/cpufreq/cpufreq_ktoonservativeq.c | 1608 --------------------- drivers/cpufreq/cpufreq_smartassv2.c | 905 ------------ net/netfilter/xt_socket.c | 8 +- 5 files changed, 4 insertions(+), 3838 deletions(-) delete mode 100644 drivers/cpufreq/cpufreq_interactive.c delete mode 100644 drivers/cpufreq/cpufreq_ktoonservativeq.c delete mode 100644 drivers/cpufreq/cpufreq_smartassv2.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 9b3eda6c..6c5d815d 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -104,12 +104,6 @@ config CPU_FREQ_DEFAULT_GOV_BADASS If in doubt, say N -config CPU_FREQ_DEFAULT_GOV_SMARTASSV2 - bool "badass" - select CPU_FREQ_GOV_SMARTASSV2 - help - Use the CPUFreq governor 'smartassv2' as default. - config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE bool "conservative" select CPU_FREQ_GOV_CONSERVATIVE @@ -127,16 +121,6 @@ config CPU_FREQ_DEFAULT_GOV_DANCEDANCE select CPU_FREQ_GOV_DANCEDANCE help -config CPU_FREQ_DEFAULT_GOV_INTERACTIVE - bool "interactive" - select CPU_FREQ_GOV_INTERACTIVE - help - Use the CPUFreq governor 'interactive' as default. This allows - you to get a full dynamic cpu frequency capable system by simply - loading your cpufreq low-level hardware driver, using the - 'interactive' governor for latency-sensitive workloads. - - config CPU_FREQ_DEFAULT_GOV_LIONHEART bool "lionheart" select CPU_FREQ_GOV_LIONHEART @@ -257,11 +241,6 @@ config CPU_FREQ_GOV_ABYSSPLUG If you don't have one of these architectures or devices, use 'ondemand' instead. If in doubt, say N. -config CPU_FREQ_GOV_SMARTASSV2 - tristate "smartassv2" - depends on CPU_FREQ - help - Use the CPUFreq governor 'smartassv2' as default. config CPU_FREQ_GOV_ADAPTIVE tristate "'adaptive' cpufreq policy governor" @@ -338,23 +317,6 @@ config CPU_FREQ_GOV_DANCEDANCE tristate "'dancedance' cpufreq governor" depends on CPU_FREQ -config CPU_FREQ_GOV_INTERACTIVE - tristate "'interactive' cpufreq policy governor" - help - 'interactive' - This driver adds a dynamic cpufreq policy governor - designed for latency-sensitive workloads. - - This governor attempts to reduce the latency of clock - increases so that the system is more responsive to - interactive workloads. - - To compile this driver as a module, choose M here: the - module will be called cpufreq_interactive. - - For details, take a look at linux/Documentation/cpu-freq. - - If in doubt, say N. - config CPU_FREQ_GOV_INTELLIDEMAND tristate "'intellidemand' cpufreq policy governor" select CPU_FREQ_TABLE @@ -373,30 +335,6 @@ config CPU_FREQ_GOV_INTELLIDEMAND If in doubt, say N. -config CPU_FREQ_GOV_KTOONSERVATIVEQ - tristate "'ktoonservativeq' cpufreq governor" - depends on CPU_FREQ - help - 'ktoonservativeq' - this driver is rather similar to the 'ondemand' - governor both in its source code and its purpose, the difference is - its optimisation for better suitability in a battery powered - environment. The frequency is gracefully increased and decreased - rather than jumping to 100% when speed is required. - - If you have a desktop machine then you should really be considering - the 'ondemand' governor instead, however if you are using a laptop, - PDA or even an AMD64 based computer (due to the unacceptable - step-by-step latency issues between the minimum and maximum frequency - transitions in the CPU) you will probably want to use this governor. - This governor adds the capability of hotpluging. - - To compile this driver as a module, choose M here: the - module will be called cpufreq_ktoonservativeq. - - For details, take a look at linux/Documentation/cpu-freq. - - If in doubt, say N. - config CPU_FREQ_GOV_NIGHTMARE tristate "'nightmare' cpufreq governor" depends on CPU_FREQ diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c deleted file mode 100644 index a5983c1e..00000000 --- a/drivers/cpufreq/cpufreq_interactive.c +++ /dev/null @@ -1,1259 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_interactive.c - * - * Copyright (C) 2010 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Author: Mike Chan (mike@android.com) - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define CREATE_TRACE_POINTS -#include - -static int active_count; - -struct cpufreq_interactive_cpuinfo { - struct timer_list cpu_timer; - struct timer_list cpu_slack_timer; - spinlock_t load_lock; /* protects the next 4 fields */ - u64 time_in_idle; - u64 time_in_idle_timestamp; - u64 cputime_speedadj; - u64 cputime_speedadj_timestamp; - struct cpufreq_policy *policy; - struct cpufreq_frequency_table *freq_table; - unsigned int target_freq; - unsigned int floor_freq; - u64 floor_validate_time; - u64 hispeed_validate_time; - struct rw_semaphore enable_sem; - int governor_enabled; - int cpu_load; -}; - -static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo); - -/* realtime thread handles frequency scaling */ -static struct task_struct *speedchange_task; -static cpumask_t speedchange_cpumask; -static spinlock_t speedchange_cpumask_lock; -static struct mutex gov_lock; - -/* Hi speed to bump to from lo speed when load burst (default max) */ -static unsigned int hispeed_freq; - -/* Go to hi speed when CPU load at or above this value. */ -#define DEFAULT_GO_HISPEED_LOAD 99 -static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD; - -/* Target load. Lower values result in higher CPU speeds. */ -#define DEFAULT_TARGET_LOAD 90 -static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD}; -static spinlock_t target_loads_lock; -static unsigned int *target_loads = default_target_loads; -static int ntarget_loads = ARRAY_SIZE(default_target_loads); - -/* - * The minimum amount of time to spend at a frequency before we can ramp down. - */ -#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC) -static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME; - -/* - * The sample rate of the timer used to increase frequency - */ -#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC) -static unsigned long timer_rate = DEFAULT_TIMER_RATE; - -/* - * Wait this long before raising speed above hispeed, by default a single - * timer interval. - */ -#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE -static unsigned int default_above_hispeed_delay[] = { - DEFAULT_ABOVE_HISPEED_DELAY }; -static spinlock_t above_hispeed_delay_lock; -static unsigned int *above_hispeed_delay = default_above_hispeed_delay; -static int nabove_hispeed_delay = ARRAY_SIZE(default_above_hispeed_delay); - -/* Non-zero means indefinite speed boost active */ -static int boost_val; -/* Duration of a boot pulse in usecs */ -static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME; -/* End time of boost pulse in ktime converted to usecs */ -static u64 boostpulse_endtime; - -/* - * Max additional time to wait in idle, beyond timer_rate, at speeds above - * minimum before wakeup to reduce speed, or -1 if unnecessary. - */ -#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE) -static int timer_slack_val = DEFAULT_TIMER_SLACK; - -static bool io_is_busy; - -static int cpufreq_governor_interactive(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE -static -#endif -struct cpufreq_governor cpufreq_gov_interactive = { - .name = "interactive", - .governor = cpufreq_governor_interactive, - .max_transition_latency = 10000000, - .owner = THIS_MODULE, -}; - -static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, - cputime64_t *wall) -{ - u64 idle_time; - u64 cur_wall_time; - u64 busy_time; - - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - - busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - - idle_time = cur_wall_time - busy_time; - if (wall) - *wall = jiffies_to_usecs(cur_wall_time); - - return jiffies_to_usecs(idle_time); -} - -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, - cputime64_t *wall) -{ - u64 idle_time = get_cpu_idle_time_us(cpu, wall); - - if (idle_time == -1ULL) - idle_time = get_cpu_idle_time_jiffy(cpu, wall); - else if (!io_is_busy) - idle_time += get_cpu_iowait_time_us(cpu, wall); - - return idle_time; -} - -static void cpufreq_interactive_timer_resched( - struct cpufreq_interactive_cpuinfo *pcpu) -{ - unsigned long expires; - unsigned long flags; - - spin_lock_irqsave(&pcpu->load_lock, flags); - pcpu->time_in_idle = - get_cpu_idle_time(smp_processor_id(), - &pcpu->time_in_idle_timestamp); - pcpu->cputime_speedadj = 0; - pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; - expires = jiffies + usecs_to_jiffies(timer_rate); - mod_timer_pinned(&pcpu->cpu_timer, expires); - - if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) { - expires += usecs_to_jiffies(timer_slack_val); - mod_timer_pinned(&pcpu->cpu_slack_timer, expires); - } - - spin_unlock_irqrestore(&pcpu->load_lock, flags); -} - -/* The caller shall take enable_sem write semaphore to avoid any timer race. - * The cpu_timer and cpu_slack_timer must be deactivated when calling this - * function. - */ -static void cpufreq_interactive_timer_start(int cpu) -{ - struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); - unsigned long expires = jiffies + usecs_to_jiffies(timer_rate); - unsigned long flags; - - pcpu->cpu_timer.expires = expires; - add_timer_on(&pcpu->cpu_timer, cpu); - if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) { - expires += usecs_to_jiffies(timer_slack_val); - pcpu->cpu_slack_timer.expires = expires; - add_timer_on(&pcpu->cpu_slack_timer, cpu); - } - - spin_lock_irqsave(&pcpu->load_lock, flags); - pcpu->time_in_idle = - get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp); - pcpu->cputime_speedadj = 0; - pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; - spin_unlock_irqrestore(&pcpu->load_lock, flags); -} - -static unsigned int freq_to_above_hispeed_delay(unsigned int freq) -{ - int i; - unsigned int ret; - unsigned long flags; - - spin_lock_irqsave(&above_hispeed_delay_lock, flags); - - for (i = 0; i < nabove_hispeed_delay - 1 && - freq >= above_hispeed_delay[i+1]; i += 2) - ; - - ret = above_hispeed_delay[i]; - spin_unlock_irqrestore(&above_hispeed_delay_lock, flags); - return ret; -} - -static unsigned int freq_to_targetload(unsigned int freq) -{ - int i; - unsigned int ret; - unsigned long flags; - - spin_lock_irqsave(&target_loads_lock, flags); - - for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2) - ; - - ret = target_loads[i]; - spin_unlock_irqrestore(&target_loads_lock, flags); - return ret; -} - -/* - * If increasing frequencies never map to a lower target load then - * choose_freq() will find the minimum frequency that does not exceed its - * target load given the current load. - */ - -static unsigned int choose_freq( - struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq) -{ - unsigned int freq = pcpu->policy->cur; - unsigned int prevfreq, freqmin, freqmax; - unsigned int tl; - int index; - - freqmin = 0; - freqmax = UINT_MAX; - - do { - prevfreq = freq; - tl = freq_to_targetload(freq); - - /* - * Find the lowest frequency where the computed load is less - * than or equal to the target load. - */ - - if (cpufreq_frequency_table_target( - pcpu->policy, pcpu->freq_table, loadadjfreq / tl, - CPUFREQ_RELATION_L, &index)) - break; - freq = pcpu->freq_table[index].frequency; - - if (freq > prevfreq) { - /* The previous frequency is too low. */ - freqmin = prevfreq; - - if (freq >= freqmax) { - /* - * Find the highest frequency that is less - * than freqmax. - */ - if (cpufreq_frequency_table_target( - pcpu->policy, pcpu->freq_table, - freqmax - 1, CPUFREQ_RELATION_H, - &index)) - break; - freq = pcpu->freq_table[index].frequency; - - if (freq == freqmin) { - /* - * The first frequency below freqmax - * has already been found to be too - * low. freqmax is the lowest speed - * we found that is fast enough. - */ - freq = freqmax; - break; - } - } - } else if (freq < prevfreq) { - /* The previous frequency is high enough. */ - freqmax = prevfreq; - - if (freq <= freqmin) { - /* - * Find the lowest frequency that is higher - * than freqmin. - */ - if (cpufreq_frequency_table_target( - pcpu->policy, pcpu->freq_table, - freqmin + 1, CPUFREQ_RELATION_L, - &index)) - break; - freq = pcpu->freq_table[index].frequency; - - /* - * If freqmax is the first frequency above - * freqmin then we have already found that - * this speed is fast enough. - */ - if (freq == freqmax) - break; - } - } - - /* If same frequency chosen as previous then done. */ - } while (freq != prevfreq); - - return freq; -} - -static u64 update_load(int cpu) -{ - struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); - u64 now; - u64 now_idle; - unsigned int delta_idle; - unsigned int delta_time; - u64 active_time; - - now_idle = get_cpu_idle_time(cpu, &now); - delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle); - delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp); - - if (delta_time <= delta_idle) - active_time = 0; - else - active_time = delta_time - delta_idle; - - pcpu->cputime_speedadj += active_time * pcpu->policy->cur; - - pcpu->time_in_idle = now_idle; - pcpu->time_in_idle_timestamp = now; - return now; -} - -static void cpufreq_interactive_timer(unsigned long data) -{ - u64 now; - unsigned int delta_time; - u64 cputime_speedadj; - int cpu_load; - struct cpufreq_interactive_cpuinfo *pcpu = - &per_cpu(cpuinfo, data); - unsigned int new_freq; - unsigned int loadadjfreq; - unsigned int index; - unsigned long flags; - bool boosted; - - if (!down_read_trylock(&pcpu->enable_sem)) - return; - if (!pcpu->governor_enabled) - goto exit; - - spin_lock_irqsave(&pcpu->load_lock, flags); - now = update_load(data); - delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp); - cputime_speedadj = pcpu->cputime_speedadj; - spin_unlock_irqrestore(&pcpu->load_lock, flags); - - if (WARN_ON_ONCE(!delta_time)) - goto rearm; - - do_div(cputime_speedadj, delta_time); - loadadjfreq = (unsigned int)cputime_speedadj * 100; - cpu_load = loadadjfreq / pcpu->target_freq; - boosted = boost_val || now < boostpulse_endtime; - - pcpu->cpu_load = cpu_load; - - if (cpu_load >= go_hispeed_load || boosted) { - if (pcpu->target_freq < hispeed_freq) { - new_freq = hispeed_freq; - } else { - new_freq = choose_freq(pcpu, loadadjfreq); - - if (new_freq < hispeed_freq) - new_freq = hispeed_freq; - } - } else { - new_freq = choose_freq(pcpu, loadadjfreq); - } - - if (kt_freq_control[1] == 0 && pcpu->target_freq >= hispeed_freq && - new_freq > pcpu->target_freq && - now - pcpu->hispeed_validate_time < - freq_to_above_hispeed_delay(pcpu->target_freq)) { - trace_cpufreq_interactive_notyet( - data, cpu_load, pcpu->target_freq, - pcpu->policy->cur, new_freq); - goto rearm; - } - - pcpu->hispeed_validate_time = now; - - if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table, - new_freq, CPUFREQ_RELATION_L, - &index)) - goto rearm; - - new_freq = pcpu->freq_table[index].frequency; - - /* - * Do not scale below floor_freq unless we have been at or above the - * floor frequency for the minimum sample time since last validated. - */ - if (kt_freq_control[1] == 0 && new_freq < pcpu->floor_freq) { - if (now - pcpu->floor_validate_time < min_sample_time) { - trace_cpufreq_interactive_notyet( - data, cpu_load, pcpu->target_freq, - pcpu->policy->cur, new_freq); - goto rearm; - } - } - - /* - * Update the timestamp for checking whether speed has been held at - * or above the selected frequency for a minimum of min_sample_time, - * if not boosted to hispeed_freq. If boosted to hispeed_freq then we - * allow the speed to drop as soon as the boostpulse duration expires - * (or the indefinite boost is turned off). - */ - - if (!boosted || new_freq > hispeed_freq) { - pcpu->floor_freq = new_freq; - pcpu->floor_validate_time = now; - } - - if (pcpu->target_freq == new_freq && kt_freq_control[1] == 0) { - trace_cpufreq_interactive_already( - data, cpu_load, pcpu->target_freq, - pcpu->policy->cur, new_freq); - goto rearm_if_notmax; - } - - trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq, - pcpu->policy->cur, new_freq); - - pcpu->target_freq = new_freq; - spin_lock_irqsave(&speedchange_cpumask_lock, flags); - cpumask_set_cpu(data, &speedchange_cpumask); - spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); - wake_up_process(speedchange_task); - -rearm_if_notmax: - /* - * Already set max speed and don't see a need to change that, - * wait until next idle to re-evaluate, don't need timer. - */ - if (pcpu->target_freq == pcpu->policy->max) - goto exit; - -rearm: - if (!timer_pending(&pcpu->cpu_timer)) - cpufreq_interactive_timer_resched(pcpu); - -exit: - up_read(&pcpu->enable_sem); - return; -} - -static void cpufreq_interactive_idle_start(void) -{ - struct cpufreq_interactive_cpuinfo *pcpu = - &per_cpu(cpuinfo, smp_processor_id()); - int pending; - - if (!down_read_trylock(&pcpu->enable_sem)) - return; - if (!pcpu->governor_enabled) { - up_read(&pcpu->enable_sem); - return; - } - - pending = timer_pending(&pcpu->cpu_timer); - - if (pcpu->target_freq != pcpu->policy->min) { - /* - * Entering idle while not at lowest speed. On some - * platforms this can hold the other CPU(s) at that speed - * even though the CPU is idle. Set a timer to re-evaluate - * speed so this idle CPU doesn't hold the other CPUs above - * min indefinitely. This should probably be a quirk of - * the CPUFreq driver. - */ - if (!pending) - cpufreq_interactive_timer_resched(pcpu); - } - - up_read(&pcpu->enable_sem); -} - -static void cpufreq_interactive_idle_end(void) -{ - struct cpufreq_interactive_cpuinfo *pcpu = - &per_cpu(cpuinfo, smp_processor_id()); - - if (!down_read_trylock(&pcpu->enable_sem)) - return; - if (!pcpu->governor_enabled) { - up_read(&pcpu->enable_sem); - return; - } - - /* Arm the timer for 1-2 ticks later if not already. */ - if (!timer_pending(&pcpu->cpu_timer)) { - cpufreq_interactive_timer_resched(pcpu); - } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) { - del_timer(&pcpu->cpu_timer); - del_timer(&pcpu->cpu_slack_timer); - cpufreq_interactive_timer(smp_processor_id()); - } - - up_read(&pcpu->enable_sem); -} - -static int cpufreq_interactive_speedchange_task(void *data) -{ - unsigned int cpu; - cpumask_t tmp_mask; - unsigned long flags; - struct cpufreq_interactive_cpuinfo *pcpu; - - while (1) { - set_current_state(TASK_INTERRUPTIBLE); - spin_lock_irqsave(&speedchange_cpumask_lock, flags); - - if (cpumask_empty(&speedchange_cpumask)) { - spin_unlock_irqrestore(&speedchange_cpumask_lock, - flags); - schedule(); - - if (kthread_should_stop()) - break; - - spin_lock_irqsave(&speedchange_cpumask_lock, flags); - } - - set_current_state(TASK_RUNNING); - tmp_mask = speedchange_cpumask; - cpumask_clear(&speedchange_cpumask); - spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); - - for_each_cpu(cpu, &tmp_mask) { - unsigned int j; - unsigned int max_freq = 0; - - pcpu = &per_cpu(cpuinfo, cpu); - if (!down_read_trylock(&pcpu->enable_sem)) - continue; - if (!pcpu->governor_enabled) { - up_read(&pcpu->enable_sem); - continue; - } - - //KT hook - if (kt_freq_control[cpu] > 0) - { - max_freq = kt_freq_control[cpu]; - goto skipcpu; - } - - for_each_cpu(j, pcpu->policy->cpus) { - struct cpufreq_interactive_cpuinfo *pjcpu = - &per_cpu(cpuinfo, j); - - if (pjcpu->target_freq > max_freq) - max_freq = pjcpu->target_freq; - - cpufreq_notify_utilization(pcpu->policy, (pcpu->cpu_load * pcpu->policy->cur) / pcpu->policy->cpuinfo.max_freq); - } - -skipcpu: - if (max_freq != pcpu->policy->cur) - __cpufreq_driver_target(pcpu->policy, - max_freq, - CPUFREQ_RELATION_H); - trace_cpufreq_interactive_setspeed(cpu, - pcpu->target_freq, - pcpu->policy->cur); - - up_read(&pcpu->enable_sem); - } - } - - return 0; -} - -static void cpufreq_interactive_boost(void) -{ - int i; - int anyboost = 0; - unsigned long flags; - struct cpufreq_interactive_cpuinfo *pcpu; - - spin_lock_irqsave(&speedchange_cpumask_lock, flags); - - for_each_online_cpu(i) { - pcpu = &per_cpu(cpuinfo, i); - - if (pcpu->target_freq < hispeed_freq) { - pcpu->target_freq = hispeed_freq; - cpumask_set_cpu(i, &speedchange_cpumask); - pcpu->hispeed_validate_time = - ktime_to_us(ktime_get()); - anyboost = 1; - } - - /* - * Set floor freq and (re)start timer for when last - * validated. - */ - - pcpu->floor_freq = hispeed_freq; - pcpu->floor_validate_time = ktime_to_us(ktime_get()); - } - - spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); - - if (anyboost) - wake_up_process(speedchange_task); -} - -static int cpufreq_interactive_notifier( - struct notifier_block *nb, unsigned long val, void *data) -{ - struct cpufreq_freqs *freq = data; - struct cpufreq_interactive_cpuinfo *pcpu; - int cpu; - unsigned long flags; - - if (val == CPUFREQ_POSTCHANGE) { - pcpu = &per_cpu(cpuinfo, freq->cpu); - if (!down_read_trylock(&pcpu->enable_sem)) - return 0; - if (!pcpu->governor_enabled) { - up_read(&pcpu->enable_sem); - return 0; - } - - for_each_cpu(cpu, pcpu->policy->cpus) { - struct cpufreq_interactive_cpuinfo *pjcpu = - &per_cpu(cpuinfo, cpu); - if (cpu != freq->cpu) { - if (!down_read_trylock(&pjcpu->enable_sem)) - continue; - if (!pjcpu->governor_enabled) { - up_read(&pjcpu->enable_sem); - continue; - } - } - spin_lock_irqsave(&pjcpu->load_lock, flags); - update_load(cpu); - spin_unlock_irqrestore(&pjcpu->load_lock, flags); - if (cpu != freq->cpu) - up_read(&pjcpu->enable_sem); - } - - up_read(&pcpu->enable_sem); - } - return 0; -} - -static struct notifier_block cpufreq_notifier_block = { - .notifier_call = cpufreq_interactive_notifier, -}; - -static unsigned int *get_tokenized_data(const char *buf, int *num_tokens) -{ - const char *cp; - int i; - int ntokens = 1; - unsigned int *tokenized_data; - int err = -EINVAL; - - cp = buf; - while ((cp = strpbrk(cp + 1, " :"))) - ntokens++; - - if (!(ntokens & 0x1)) - goto err; - - tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL); - if (!tokenized_data) { - err = -ENOMEM; - goto err; - } - - cp = buf; - i = 0; - while (i < ntokens) { - if (sscanf(cp, "%u", &tokenized_data[i++]) != 1) - goto err_kfree; - - cp = strpbrk(cp, " :"); - if (!cp) - break; - cp++; - } - - if (i != ntokens) - goto err_kfree; - - *num_tokens = ntokens; - return tokenized_data; - -err_kfree: - kfree(tokenized_data); -err: - return ERR_PTR(err); -} - -static ssize_t show_target_loads( - struct kobject *kobj, struct attribute *attr, char *buf) -{ - int i; - ssize_t ret = 0; - unsigned long flags; - - spin_lock_irqsave(&target_loads_lock, flags); - - for (i = 0; i < ntarget_loads; i++) - ret += sprintf(buf + ret, "%u%s", target_loads[i], - i & 0x1 ? ":" : " "); - - ret += sprintf(buf + ret, "\n"); - spin_unlock_irqrestore(&target_loads_lock, flags); - return ret; -} - -static ssize_t store_target_loads( - struct kobject *kobj, struct attribute *attr, const char *buf, - size_t count) -{ - int ntokens; - unsigned int *new_target_loads = NULL; - unsigned long flags; - - new_target_loads = get_tokenized_data(buf, &ntokens); - if (IS_ERR(new_target_loads)) - return PTR_RET(new_target_loads); - - spin_lock_irqsave(&target_loads_lock, flags); - if (target_loads != default_target_loads) - kfree(target_loads); - target_loads = new_target_loads; - ntarget_loads = ntokens; - spin_unlock_irqrestore(&target_loads_lock, flags); - return count; -} - -static struct global_attr target_loads_attr = - __ATTR(target_loads, S_IRUGO | S_IWUSR, - show_target_loads, store_target_loads); - -static ssize_t show_above_hispeed_delay( - struct kobject *kobj, struct attribute *attr, char *buf) -{ - int i; - ssize_t ret = 0; - unsigned long flags; - - spin_lock_irqsave(&above_hispeed_delay_lock, flags); - - for (i = 0; i < nabove_hispeed_delay; i++) - ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i], - i & 0x1 ? ":" : " "); - - ret += sprintf(buf + ret, "\n"); - spin_unlock_irqrestore(&above_hispeed_delay_lock, flags); - return ret; -} - -static ssize_t store_above_hispeed_delay( - struct kobject *kobj, struct attribute *attr, const char *buf, - size_t count) -{ - int ntokens; - unsigned int *new_above_hispeed_delay = NULL; - unsigned long flags; - - new_above_hispeed_delay = get_tokenized_data(buf, &ntokens); - if (IS_ERR(new_above_hispeed_delay)) - return PTR_RET(new_above_hispeed_delay); - - spin_lock_irqsave(&above_hispeed_delay_lock, flags); - if (above_hispeed_delay != default_above_hispeed_delay) - kfree(above_hispeed_delay); - above_hispeed_delay = new_above_hispeed_delay; - nabove_hispeed_delay = ntokens; - spin_unlock_irqrestore(&above_hispeed_delay_lock, flags); - return count; - -} - -static struct global_attr above_hispeed_delay_attr = - __ATTR(above_hispeed_delay, S_IRUGO | S_IWUSR, - show_above_hispeed_delay, store_above_hispeed_delay); - -static ssize_t show_hispeed_freq(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", hispeed_freq); -} - -static ssize_t store_hispeed_freq(struct kobject *kobj, - struct attribute *attr, const char *buf, - size_t count) -{ - int ret; - long unsigned int val; - - ret = strict_strtoul(buf, 0, &val); - if (ret < 0) - return ret; - hispeed_freq = val; - return count; -} - -static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644, - show_hispeed_freq, store_hispeed_freq); - - -static ssize_t show_go_hispeed_load(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", go_hispeed_load); -} - -static ssize_t store_go_hispeed_load(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned long val; - - ret = strict_strtoul(buf, 0, &val); - if (ret < 0) - return ret; - go_hispeed_load = val; - return count; -} - -static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644, - show_go_hispeed_load, store_go_hispeed_load); - -static ssize_t show_min_sample_time(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", min_sample_time); -} - -static ssize_t store_min_sample_time(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned long val; - - ret = strict_strtoul(buf, 0, &val); - if (ret < 0) - return ret; - min_sample_time = val; - return count; -} - -static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644, - show_min_sample_time, store_min_sample_time); - -static ssize_t show_timer_rate(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", timer_rate); -} - -static ssize_t store_timer_rate(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned long val; - - ret = strict_strtoul(buf, 0, &val); - if (ret < 0) - return ret; - timer_rate = val; - return count; -} - -static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644, - show_timer_rate, store_timer_rate); - -static ssize_t show_timer_slack( - struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", timer_slack_val); -} - -static ssize_t store_timer_slack( - struct kobject *kobj, struct attribute *attr, const char *buf, - size_t count) -{ - int ret; - unsigned long val; - - ret = kstrtol(buf, 10, &val); - if (ret < 0) - return ret; - - timer_slack_val = val; - return count; -} - -define_one_global_rw(timer_slack); - -static ssize_t show_boost(struct kobject *kobj, struct attribute *attr, - char *buf) -{ - return sprintf(buf, "%d\n", boost_val); -} - -static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, - const char *buf, size_t count) -{ - int ret; - unsigned long val; - - ret = kstrtoul(buf, 0, &val); - if (ret < 0) - return ret; - - boost_val = val; - - if (boost_val) { - trace_cpufreq_interactive_boost("on"); - cpufreq_interactive_boost(); - } else { - trace_cpufreq_interactive_unboost("off"); - } - - return count; -} - -define_one_global_rw(boost); - -static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr, - const char *buf, size_t count) -{ - int ret; - unsigned long val; - - ret = kstrtoul(buf, 0, &val); - if (ret < 0) - return ret; - - boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val; - trace_cpufreq_interactive_boost("pulse"); - cpufreq_interactive_boost(); - return count; -} - -static struct global_attr boostpulse = - __ATTR(boostpulse, 0200, NULL, store_boostpulse); - -static ssize_t show_boostpulse_duration( - struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", boostpulse_duration_val); -} - -static ssize_t store_boostpulse_duration( - struct kobject *kobj, struct attribute *attr, const char *buf, - size_t count) -{ - int ret; - unsigned long val; - - ret = kstrtoul(buf, 0, &val); - if (ret < 0) - return ret; - - boostpulse_duration_val = val; - return count; -} - -define_one_global_rw(boostpulse_duration); - -static ssize_t show_io_is_busy(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", io_is_busy); -} - -static ssize_t store_io_is_busy(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned long val; - - ret = kstrtoul(buf, 0, &val); - if (ret < 0) - return ret; - io_is_busy = val; - return count; -} - -static struct global_attr io_is_busy_attr = __ATTR(io_is_busy, 0644, - show_io_is_busy, store_io_is_busy); - -static struct attribute *interactive_attributes[] = { - &target_loads_attr.attr, - &above_hispeed_delay_attr.attr, - &hispeed_freq_attr.attr, - &go_hispeed_load_attr.attr, - &min_sample_time_attr.attr, - &timer_rate_attr.attr, - &timer_slack.attr, - &boost.attr, - &boostpulse.attr, - &boostpulse_duration.attr, - &io_is_busy_attr.attr, - NULL, -}; - -static struct attribute_group interactive_attr_group = { - .attrs = interactive_attributes, - .name = "interactive", -}; - -static int cpufreq_interactive_idle_notifier(struct notifier_block *nb, - unsigned long val, - void *data) -{ - switch (val) { - case IDLE_START: - cpufreq_interactive_idle_start(); - break; - case IDLE_END: - cpufreq_interactive_idle_end(); - break; - } - - return 0; -} - -static struct notifier_block cpufreq_interactive_idle_nb = { - .notifier_call = cpufreq_interactive_idle_notifier, -}; - -static int cpufreq_governor_interactive(struct cpufreq_policy *policy, - unsigned int event) -{ - int rc; - unsigned int j; - struct cpufreq_interactive_cpuinfo *pcpu; - struct cpufreq_frequency_table *freq_table; - - switch (event) { - case CPUFREQ_GOV_START: - if (!cpu_online(policy->cpu)) - return -EINVAL; - - mutex_lock(&gov_lock); - - freq_table = - cpufreq_frequency_get_table(policy->cpu); - if (!hispeed_freq) - hispeed_freq = policy->max; - - for_each_cpu(j, policy->cpus) { - pcpu = &per_cpu(cpuinfo, j); - pcpu->policy = policy; - pcpu->target_freq = policy->cur; - pcpu->freq_table = freq_table; - pcpu->floor_freq = pcpu->target_freq; - pcpu->floor_validate_time = - ktime_to_us(ktime_get()); - pcpu->hispeed_validate_time = - pcpu->floor_validate_time; - down_write(&pcpu->enable_sem); - cpufreq_interactive_timer_start(j); - pcpu->governor_enabled = 1; - up_write(&pcpu->enable_sem); - } - - /* - * Do not register the idle hook and create sysfs - * entries if we have already done so. - */ - if (++active_count > 1) { - mutex_unlock(&gov_lock); - return 0; - } - - rc = sysfs_create_group(cpufreq_global_kobject, - &interactive_attr_group); - if (rc) { - mutex_unlock(&gov_lock); - return rc; - } - - idle_notifier_register(&cpufreq_interactive_idle_nb); - cpufreq_register_notifier( - &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); - mutex_unlock(&gov_lock); - break; - - case CPUFREQ_GOV_STOP: - mutex_lock(&gov_lock); - for_each_cpu(j, policy->cpus) { - pcpu = &per_cpu(cpuinfo, j); - down_write(&pcpu->enable_sem); - pcpu->governor_enabled = 0; - del_timer_sync(&pcpu->cpu_timer); - del_timer_sync(&pcpu->cpu_slack_timer); - up_write(&pcpu->enable_sem); - } - - if (--active_count > 0) { - mutex_unlock(&gov_lock); - return 0; - } - - cpufreq_unregister_notifier( - &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); - idle_notifier_unregister(&cpufreq_interactive_idle_nb); - sysfs_remove_group(cpufreq_global_kobject, - &interactive_attr_group); - mutex_unlock(&gov_lock); - - break; - - case CPUFREQ_GOV_LIMITS: - if (policy->max < policy->cur) - __cpufreq_driver_target(policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > policy->cur) - __cpufreq_driver_target(policy, - policy->min, CPUFREQ_RELATION_L); - for_each_cpu(j, policy->cpus) { - pcpu = &per_cpu(cpuinfo, j); - - /* hold write semaphore to avoid race */ - down_write(&pcpu->enable_sem); - if (pcpu->governor_enabled == 0) { - up_write(&pcpu->enable_sem); - continue; - } - - /* update target_freq firstly */ - if (policy->max < pcpu->target_freq) - pcpu->target_freq = policy->max; - else if (policy->min > pcpu->target_freq) - pcpu->target_freq = policy->min; - - /* Reschedule timer. - * Delete the timers, else the timer callback may - * return without re-arm the timer when failed - * acquire the semaphore. This race may cause timer - * stopped unexpectedly. - */ - del_timer_sync(&pcpu->cpu_timer); - del_timer_sync(&pcpu->cpu_slack_timer); - cpufreq_interactive_timer_start(j); - up_write(&pcpu->enable_sem); - } - break; - } - return 0; -} - -static void cpufreq_interactive_nop_timer(unsigned long data) -{ -} - -static int __init cpufreq_interactive_init(void) -{ - unsigned int i; - struct cpufreq_interactive_cpuinfo *pcpu; - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - - /* Initalize per-cpu timers */ - for_each_possible_cpu(i) { - pcpu = &per_cpu(cpuinfo, i); - init_timer_deferrable(&pcpu->cpu_timer); - pcpu->cpu_timer.function = cpufreq_interactive_timer; - pcpu->cpu_timer.data = i; - init_timer(&pcpu->cpu_slack_timer); - pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer; - spin_lock_init(&pcpu->load_lock); - init_rwsem(&pcpu->enable_sem); - } - - spin_lock_init(&target_loads_lock); - spin_lock_init(&speedchange_cpumask_lock); - spin_lock_init(&above_hispeed_delay_lock); - mutex_init(&gov_lock); - speedchange_task = - kthread_create(cpufreq_interactive_speedchange_task, NULL, - "cfinteractive"); - if (IS_ERR(speedchange_task)) - return PTR_ERR(speedchange_task); - - sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m); - get_task_struct(speedchange_task); - - /* NB: wake up so the thread does not look hung to the freezer */ - wake_up_process(speedchange_task); - - return cpufreq_register_governor(&cpufreq_gov_interactive); -} - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE -fs_initcall(cpufreq_interactive_init); -#else -module_init(cpufreq_interactive_init); -#endif - -static void __exit cpufreq_interactive_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_interactive); - kthread_stop(speedchange_task); - put_task_struct(speedchange_task); -} - -module_exit(cpufreq_interactive_exit); - -MODULE_AUTHOR("Mike Chan "); -MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for " - "Latency sensitive workloads"); -MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq_ktoonservativeq.c b/drivers/cpufreq/cpufreq_ktoonservativeq.c deleted file mode 100644 index fc081c10..00000000 --- a/drivers/cpufreq/cpufreq_ktoonservativeq.c +++ /dev/null @@ -1,1608 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_ktoonservative.c - * - * Copyright (C) 2001 Russell King - * (C) 2003 Venkatesh Pallipadi . - * Jun Nakajima - * (C) 2009 Alexander Clouter - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ - -#define DEF_CPU_DOWN_BLOCK_CYCLES (11) -#define DEF_BOOST_CPU (1134000) -#define DEF_BOOST_GPU (450) -#define DEF_BOOST_HOLD_CYCLES (22) -#define DEF_DISABLE_HOTPLUGGING (0) -#define CPUS_AVAILABLE num_possible_cpus() -static int hotplug_cpu_enable_up[] = { 0, 58, 68, 78 }; -static int hotplug_cpu_enable_down[] = { 0, 35, 45, 55 }; -static int hotplug_cpu_single_up[] = { 0, 0, 0, 0 }; -static int hotplug_cpu_single_down[] = { 0, 0, 0, 0 }; -static int hotplug_cpu_lockout[] = { 0, 0, 0, 0 }; -static bool hotplug_flag_on = false; -static unsigned int Lcpu_hotplug_block_cycles = 0; -static bool hotplug_flag_off = false; -static bool disable_hotplugging_chrg_override; - -void setExtraCores(unsigned int requested_freq); -unsigned int kt_freq_control[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) - * this governor will not work. - * All times here are in uS. - */ -#define MIN_SAMPLING_RATE_RATIO (2) - -static bool disable_hotplug_bt_active = false; -static unsigned int min_sampling_rate; -static unsigned int stored_sampling_rate = 45000; -static unsigned int Lcpu_down_block_cycles = 0; -static unsigned int Lcpu_up_block_cycles = 0; -static bool boostpulse_relayf = false; -static int boost_hold_cycles_cnt = 0; -static bool screen_is_on = true; - -extern void ktoonservative_is_active(bool val); -extern void ktoonservative_is_activebd(bool val); -extern void boost_the_gpu(int freq, int cycles); - -extern void apenable_auto_hotplug(bool state); -extern bool apget_enable_auto_hotplug(void); -static bool prev_apenable; -static bool hotplugInProgress = false; - -//extern void kt_is_active_benabled_gpio(bool val); -extern void kt_is_active_benabled_touchkey(bool val); -//extern void kt_is_active_benabled_power(bool val); -extern unsigned int get_cable_state(void); -extern void ktoonservative_is_activechrg(bool val); - -#define LATENCY_MULTIPLIER (1000) -#define MIN_LATENCY_MULTIPLIER (100) -#define DEF_SAMPLING_DOWN_FACTOR (1) -#define MAX_SAMPLING_DOWN_FACTOR (10) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) - -struct work_struct hotplug_offline_work; -struct work_struct hotplug_online_work; - -static void do_dbs_timer(struct work_struct *work); - -struct cpu_dbs_info_s { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - unsigned int down_skip; - unsigned int requested_freq; - int cpu; - unsigned int enable:1; - /* - * percpu mutex that serializes governor limit change with - * do_dbs_timer invocation. We do not want do_dbs_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - -/* - * dbs_mutex protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); - -static struct workqueue_struct *dbs_wq; - -static struct dbs_tuners { - unsigned int sampling_rate; - unsigned int sampling_rate_screen_off; - unsigned int sampling_down_factor; - unsigned int up_threshold; - unsigned int up_threshold_hotplug_1; - unsigned int up_threshold_hotplug_2; - unsigned int up_threshold_hotplug_3; - unsigned int down_threshold; - unsigned int down_threshold_hotplug_1; - unsigned int down_threshold_hotplug_2; - unsigned int down_threshold_hotplug_3; - unsigned int cpu_down_block_cycles; - unsigned int cpu_hotplug_block_cycles; - unsigned int touch_boost_cpu; - unsigned int touch_boost_cpu_all_cores; - unsigned int touch_boost_2nd_core; - unsigned int touch_boost_3rd_core; - unsigned int touch_boost_4th_core; - unsigned int boost_2nd_core_on_button; - unsigned int boost_3rd_core_on_button; - unsigned int boost_4th_core_on_button; - unsigned int lockout_2nd_core_hotplug; - unsigned int lockout_3rd_core_hotplug; - unsigned int lockout_4th_core_hotplug; - //unsigned int touch_boost_gpu; - unsigned int sync_extra_cores; - unsigned int boost_hold_cycles; - unsigned int disable_hotplugging; - unsigned int disable_hotplugging_chrg; - unsigned int disable_hotplug_bt; - unsigned int no_extra_cores_screen_off; - unsigned int ignore_nice; - unsigned int freq_step; -} dbs_tuners_ins = { - .up_threshold = 57, - .up_threshold_hotplug_1 = 58, - .up_threshold_hotplug_2 = 68, - .up_threshold_hotplug_3 = 78, - .down_threshold = 52, - .down_threshold_hotplug_1 = 35, - .down_threshold_hotplug_2 = 45, - .down_threshold_hotplug_3 = 55, - .cpu_down_block_cycles = DEF_CPU_DOWN_BLOCK_CYCLES, - .cpu_hotplug_block_cycles = DEF_CPU_DOWN_BLOCK_CYCLES, - .touch_boost_cpu = DEF_BOOST_CPU, - .touch_boost_cpu_all_cores = 0, - .touch_boost_2nd_core = 1, - .touch_boost_3rd_core = 0, - .touch_boost_4th_core = 0, - .boost_2nd_core_on_button = 1, - .boost_3rd_core_on_button = 0, - .boost_4th_core_on_button = 0, - .lockout_2nd_core_hotplug = 0, - .lockout_3rd_core_hotplug = 0, - .lockout_4th_core_hotplug = 0, - //.touch_boost_gpu = DEF_BOOST_GPU, - .sync_extra_cores = 0, - .boost_hold_cycles = DEF_BOOST_HOLD_CYCLES, - .disable_hotplugging = DEF_DISABLE_HOTPLUGGING, - .disable_hotplugging_chrg = 0, - .disable_hotplug_bt = 0, - .no_extra_cores_screen_off = 1, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, - .sampling_rate_screen_off = 45000, - .ignore_nice = 0, - .freq_step = 5, -}; - -static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, - u64 *wall) -{ - u64 idle_time; - u64 cur_wall_time; - u64 busy_time; - - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - - busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - - idle_time = cur_wall_time - busy_time; - if (wall) - *wall = jiffies_to_usecs(cur_wall_time); - - return jiffies_to_usecs(idle_time); -} - -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) -{ - u64 idle_time = get_cpu_idle_time_us(cpu, wall); - - if (idle_time == -1ULL) - return get_cpu_idle_time_jiffy(cpu, wall); - - return idle_time; -} - -/* keep track of frequency transitions */ -static int -dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, - void *data) -{ - struct cpufreq_freqs *freq = data; - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, - freq->cpu); - - struct cpufreq_policy *policy; - - if (!this_dbs_info->enable) - return 0; - - policy = this_dbs_info->cur_policy; - - /* - * we only care if our internally tracked freq moves outside - * the 'valid' ranges of freqency available to us otherwise - * we do not change it - */ - if (this_dbs_info->requested_freq > policy->max - || this_dbs_info->requested_freq < policy->min) - this_dbs_info->requested_freq = freq->new; - - return 0; -} - -static struct notifier_block dbs_cpufreq_notifier_block = { - .notifier_call = dbs_cpufreq_notifier -}; - -void set_bluetooth_state_kt(bool val) -{ - if (val == true && dbs_tuners_ins.disable_hotplug_bt == 1) - { - disable_hotplug_bt_active = true; - if (num_online_cpus() < 2) - { - int cpu; - for (cpu = 1; cpu < CPUS_AVAILABLE; cpu++) - { - if (!cpu_online(cpu)) - hotplug_cpu_single_up[cpu] = 1; - } - if (!hotplugInProgress) - queue_work_on(0, dbs_wq, &hotplug_online_work); - } - } - else - disable_hotplug_bt_active = false; -} - -void send_cable_state_kt(unsigned int state) -{ - int cpu; - if (state && dbs_tuners_ins.disable_hotplugging_chrg) - { - disable_hotplugging_chrg_override = true; - for (cpu = 1; cpu < CPUS_AVAILABLE; cpu++) - hotplug_cpu_single_up[cpu] = 1; - if (!hotplugInProgress) - queue_work_on(0, dbs_wq, &hotplug_online_work); - } - else - { - disable_hotplugging_chrg_override = false; - } -} - -/************************** sysfs interface ************************/ -static ssize_t show_sampling_rate_min(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", min_sampling_rate); -} -define_one_global_ro(sampling_rate_min); - -static ssize_t show_touch_boost_cpu(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", dbs_tuners_ins.touch_boost_cpu / 1000); -} - -static ssize_t show_touch_boost_cpu_all_cores(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", dbs_tuners_ins.touch_boost_cpu_all_cores); -} - -static ssize_t show_sync_extra_cores(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", dbs_tuners_ins.sync_extra_cores); -} - -/* cpufreq_ktoonservative Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(sampling_rate_screen_off, sampling_rate_screen_off); -show_one(sampling_down_factor, sampling_down_factor); -show_one(up_threshold, up_threshold); -show_one(up_threshold_hotplug_1, up_threshold_hotplug_1); -show_one(up_threshold_hotplug_2, up_threshold_hotplug_2); -show_one(up_threshold_hotplug_3, up_threshold_hotplug_3); -show_one(down_threshold, down_threshold); -show_one(down_threshold_hotplug_1, down_threshold_hotplug_1); -show_one(down_threshold_hotplug_2, down_threshold_hotplug_2); -show_one(down_threshold_hotplug_3, down_threshold_hotplug_3); -show_one(cpu_down_block_cycles, cpu_down_block_cycles); -show_one(cpu_hotplug_block_cycles, cpu_hotplug_block_cycles); -show_one(touch_boost_2nd_core, touch_boost_2nd_core); -show_one(touch_boost_3rd_core, touch_boost_3rd_core); -show_one(touch_boost_4th_core, touch_boost_4th_core); -show_one(boost_2nd_core_on_button, boost_2nd_core_on_button); -show_one(boost_3rd_core_on_button, boost_3rd_core_on_button); -show_one(boost_4th_core_on_button, boost_4th_core_on_button); -show_one(lockout_2nd_core_hotplug, lockout_2nd_core_hotplug); -show_one(lockout_3rd_core_hotplug, lockout_3rd_core_hotplug); -show_one(lockout_4th_core_hotplug, lockout_4th_core_hotplug); -//show_one(touch_boost_gpu, touch_boost_gpu); -show_one(boost_hold_cycles, boost_hold_cycles); -show_one(disable_hotplugging, disable_hotplugging); -show_one(disable_hotplugging_chrg, disable_hotplugging_chrg); -show_one(disable_hotplug_bt, disable_hotplug_bt); -show_one(no_extra_cores_screen_off, no_extra_cores_screen_off); -show_one(ignore_nice_load, ignore_nice); -show_one(freq_step, freq_step); - -static ssize_t store_sampling_down_factor(struct kobject *a, - struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) - return -EINVAL; - - dbs_tuners_ins.sampling_down_factor = input; - return count; -} - -static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); - stored_sampling_rate = max(input, min_sampling_rate); - return count; -} - -static ssize_t store_sampling_rate_screen_off(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - dbs_tuners_ins.sampling_rate_screen_off = max(input, min_sampling_rate); - return count; -} - -static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > 100 || - input <= dbs_tuners_ins.down_threshold) - return -EINVAL; - - dbs_tuners_ins.up_threshold = input; - return count; -} - -static ssize_t store_up_threshold_hotplug_1(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > 100 || - input <= dbs_tuners_ins.down_threshold_hotplug_1) - return -EINVAL; - - dbs_tuners_ins.up_threshold_hotplug_1 = input; - hotplug_cpu_enable_up[1] = input; - return count; -} - -static ssize_t store_up_threshold_hotplug_2(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > 100 || - input <= dbs_tuners_ins.down_threshold_hotplug_2) - return -EINVAL; - - dbs_tuners_ins.up_threshold_hotplug_2 = input; - hotplug_cpu_enable_up[2] = input; - return count; -} - -static ssize_t store_up_threshold_hotplug_3(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > 100 || - input <= dbs_tuners_ins.down_threshold_hotplug_3) - return -EINVAL; - - dbs_tuners_ins.up_threshold_hotplug_3 = input; - hotplug_cpu_enable_up[3] = input; - return count; -} - -static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - /* cannot be lower than 11 otherwise freq will not fall */ - if (ret != 1 || input < 11 || input > 100 || - input >= dbs_tuners_ins.up_threshold) - return -EINVAL; - - dbs_tuners_ins.down_threshold = input; - return count; -} - -static ssize_t store_down_threshold_hotplug_1(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - /* cannot be lower than 11 otherwise freq will not fall */ - if (ret != 1 || input < 11 || input > 100 || - input >= dbs_tuners_ins.up_threshold_hotplug_1) - return -EINVAL; - - dbs_tuners_ins.down_threshold_hotplug_1 = input; - hotplug_cpu_enable_down[1] = input; - return count; -} - -static ssize_t store_down_threshold_hotplug_2(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - /* cannot be lower than 11 otherwise freq will not fall */ - if (ret != 1 || input < 11 || input > 100 || - input >= dbs_tuners_ins.up_threshold_hotplug_2) - return -EINVAL; - - dbs_tuners_ins.down_threshold_hotplug_2 = input; - hotplug_cpu_enable_down[2] = input; - return count; -} - -static ssize_t store_down_threshold_hotplug_3(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - /* cannot be lower than 11 otherwise freq will not fall */ - if (ret != 1 || input < 11 || input > 100 || - input >= dbs_tuners_ins.up_threshold_hotplug_3) - return -EINVAL; - - dbs_tuners_ins.down_threshold_hotplug_3 = input; - hotplug_cpu_enable_down[3] = input; - return count; -} - -static ssize_t store_cpu_down_block_cycles(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - /* cannot be lower than 11 otherwise freq will not fall */ - if (input < 0) - return -EINVAL; - - dbs_tuners_ins.cpu_down_block_cycles = input; - return count; -} - -static ssize_t store_cpu_hotplug_block_cycles(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - /* cannot be lower than 11 otherwise freq will not fall */ - if (input < 0) - return -EINVAL; - - dbs_tuners_ins.cpu_hotplug_block_cycles = input; - return count; -} - -static ssize_t store_touch_boost_cpu(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - if (input * 1000 > GLOBALKT_MAX_FREQ_LIMIT) - input = GLOBALKT_MAX_FREQ_LIMIT; - if (input * 1000 < 0) - input = 0; - dbs_tuners_ins.touch_boost_cpu = input * 1000; - return count; -} - -static ssize_t store_touch_boost_cpu_all_cores(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret, i; - - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - if (input != 0 && input != 1) - input = 1; - dbs_tuners_ins.touch_boost_cpu_all_cores = input; - - if (dbs_tuners_ins.sync_extra_cores == 0 && dbs_tuners_ins.touch_boost_cpu_all_cores == 0) - { - for (i = 0; i < CPUS_AVAILABLE; i++) - kt_freq_control[i] = 0; - } - return count; -} - -static ssize_t store_sync_extra_cores(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret, i; - - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - if (input != 0 && input != 1) - input = 1; - dbs_tuners_ins.sync_extra_cores = input; - - if (dbs_tuners_ins.sync_extra_cores == 0 && dbs_tuners_ins.touch_boost_cpu_all_cores == 0) - { - for (i = 0; i < CPUS_AVAILABLE; i++) - kt_freq_control[i] = 0; - } - return count; -} - -static ssize_t store_touch_boost_2nd_core(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (input != 0 && input != 1) - input = 0; - - dbs_tuners_ins.touch_boost_2nd_core = input; - return count; -} - -static ssize_t store_touch_boost_3rd_core(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (input != 0 && input != 1) - input = 0; - - dbs_tuners_ins.touch_boost_3rd_core = input; - return count; -} - -static ssize_t store_touch_boost_4th_core(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (input != 0 && input != 1) - input = 0; - - dbs_tuners_ins.touch_boost_4th_core = input; - return count; -} - -static ssize_t store_lockout_2nd_core_hotplug(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret, cpu; - ret = sscanf(buf, "%u", &input); - - if (input != 0 && input != 1 && input != 2) - input = 0; - - dbs_tuners_ins.lockout_2nd_core_hotplug = input; - hotplug_cpu_lockout[1] = input; - if (input == 1) - { - hotplug_cpu_single_up[1] = 1; - if (!hotplugInProgress) - queue_work_on(0, dbs_wq, &hotplug_online_work); - } - else if (input == 2) - { - hotplug_cpu_single_down[1] = 1; - if (!hotplugInProgress) - queue_work_on(0, dbs_wq, &hotplug_offline_work); - } - return count; -} - -static ssize_t store_lockout_3rd_core_hotplug(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret, cpu; - ret = sscanf(buf, "%u", &input); - - if (input != 0 && input != 1 && input != 2) - input = 0; - - dbs_tuners_ins.lockout_3rd_core_hotplug = input; - hotplug_cpu_lockout[2] = input; - if (input == 1) - { - hotplug_cpu_single_up[2] = 1; - if (!hotplugInProgress) - queue_work_on(0, dbs_wq, &hotplug_online_work); - } - else if (input == 2) - { - hotplug_cpu_single_down[2] = 1; - if (!hotplugInProgress) - queue_work_on(0, dbs_wq, &hotplug_offline_work); - } - return count; -} - -static ssize_t store_lockout_4th_core_hotplug(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret, cpu; - ret = sscanf(buf, "%u", &input); - - if (input != 0 && input != 1 && input != 2) - input = 0; - - dbs_tuners_ins.lockout_4th_core_hotplug = input; - hotplug_cpu_lockout[3] = input; - if (input == 1) - { - hotplug_cpu_single_up[3] = 1; - if (!hotplugInProgress) - queue_work_on(0, dbs_wq, &hotplug_online_work); - } - else if (input == 2) - { - hotplug_cpu_single_down[3] = 1; - if (!hotplugInProgress) - queue_work_on(0, dbs_wq, &hotplug_offline_work); - } - return count; -} - -/*static ssize_t store_touch_boost_gpu(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (input != 100 && input != 160 && input != 266 && input != 350 && input != 400 && input != 450 && input != 533 && input != 612) - input = 0; - - dbs_tuners_ins.touch_boost_gpu = input; - return count; -}*/ - -static ssize_t store_boost_hold_cycles(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (input < 0) - return -EINVAL; - - dbs_tuners_ins.boost_hold_cycles = input; - return count; -} - -static ssize_t store_disable_hotplugging(struct kobject *a, struct attribute *b, const char *buf, size_t count) -{ - unsigned int input; - int ret, cpu; - ret = sscanf(buf, "%u", &input); - - if (input != 0 && input != 1) - input = 0; - - dbs_tuners_ins.disable_hotplugging = input; - if (input == 1) - { - for (cpu = 1; cpu < CPUS_AVAILABLE; cpu++) - hotplug_cpu_single_up[cpu] = 1; - if (!hotplugInProgress) - queue_work_on(0, dbs_wq, &hotplug_online_work); - } - return count; -} - -static ssize_t store_disable_hotplugging_chrg(struct kobject *a, struct attribute *b, const char *buf, size_t count) -{ - unsigned int input, c_state; - int ret, cpu; - ret = sscanf(buf, "%u", &input); - - if (input != 0 && input != 1) - input = 0; - - dbs_tuners_ins.disable_hotplugging_chrg = input; - c_state = get_cable_state(); - send_cable_state_kt(c_state); - - return count; -} - -static ssize_t store_no_extra_cores_screen_off(struct kobject *a, struct attribute *b, const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (input != 0 && input != 1) - input = 0; - - dbs_tuners_ins.no_extra_cores_screen_off = input; - return count; -} - -static ssize_t store_boost_2nd_core_on_button(struct kobject *a, struct attribute *b, const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (input != 0 && input != 1) - input = 0; - - dbs_tuners_ins.boost_2nd_core_on_button = input; - if (dbs_tuners_ins.boost_2nd_core_on_button == 1) - { - //kt_is_active_benabled_gpio(true); - kt_is_active_benabled_touchkey(true); - //kt_is_active_benabled_power(true); - } - - return count; -} - -static ssize_t store_boost_3rd_core_on_button(struct kobject *a, struct attribute *b, const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (input != 0 && input != 1) - input = 0; - - dbs_tuners_ins.boost_3rd_core_on_button = input; - if (dbs_tuners_ins.boost_3rd_core_on_button == 1) - { - //kt_is_active_benabled_gpio(true); - kt_is_active_benabled_touchkey(true); - //kt_is_active_benabled_power(true); - } - - return count; -} - -static ssize_t store_boost_4th_core_on_button(struct kobject *a, struct attribute *b, const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (input != 0 && input != 1) - input = 0; - - dbs_tuners_ins.boost_4th_core_on_button = input; - if (dbs_tuners_ins.boost_4th_core_on_button == 1) - { - //kt_is_active_benabled_gpio(true); - kt_is_active_benabled_touchkey(true); - //kt_is_active_benabled_power(true); - } - - return count; -} - -static ssize_t store_disable_hotplug_bt(struct kobject *a, struct attribute *b, const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (input != 0 && input != 1) - input = 0; - - dbs_tuners_ins.disable_hotplug_bt = input; - return count; -} - -static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */ - return count; - - dbs_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(cs_cpu_dbs_info, j); - dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - return count; -} - -static ssize_t store_freq_step(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - if (input > 100) - input = 100; - - /* no need to test here if freq_step is zero as the user might actually - * want this, they would be crazy though :) */ - dbs_tuners_ins.freq_step = input; - return count; -} - -define_one_global_rw(sampling_rate); -define_one_global_rw(sampling_rate_screen_off); -define_one_global_rw(sampling_down_factor); -define_one_global_rw(up_threshold); -define_one_global_rw(up_threshold_hotplug_1); -define_one_global_rw(up_threshold_hotplug_2); -define_one_global_rw(up_threshold_hotplug_3); -define_one_global_rw(down_threshold); -define_one_global_rw(down_threshold_hotplug_1); -define_one_global_rw(down_threshold_hotplug_2); -define_one_global_rw(down_threshold_hotplug_3); -define_one_global_rw(cpu_down_block_cycles); -define_one_global_rw(cpu_hotplug_block_cycles); -define_one_global_rw(touch_boost_cpu); -define_one_global_rw(touch_boost_cpu_all_cores); -define_one_global_rw(touch_boost_2nd_core); -define_one_global_rw(touch_boost_3rd_core); -define_one_global_rw(touch_boost_4th_core); -define_one_global_rw(boost_2nd_core_on_button); -define_one_global_rw(boost_3rd_core_on_button); -define_one_global_rw(boost_4th_core_on_button); -define_one_global_rw(lockout_2nd_core_hotplug); -define_one_global_rw(lockout_3rd_core_hotplug); -define_one_global_rw(lockout_4th_core_hotplug); -//define_one_global_rw(touch_boost_gpu); -define_one_global_rw(sync_extra_cores); -define_one_global_rw(boost_hold_cycles); -define_one_global_rw(disable_hotplugging); -define_one_global_rw(disable_hotplugging_chrg); -define_one_global_rw(disable_hotplug_bt); -define_one_global_rw(no_extra_cores_screen_off); -define_one_global_rw(ignore_nice_load); -define_one_global_rw(freq_step); - -static struct attribute *dbs_attributes[] = { - &sampling_rate_min.attr, - &sampling_rate.attr, - &sampling_rate_screen_off.attr, - &sampling_down_factor.attr, - &up_threshold.attr, - &up_threshold_hotplug_1.attr, - &up_threshold_hotplug_2.attr, - &up_threshold_hotplug_3.attr, - &down_threshold.attr, - &down_threshold_hotplug_1.attr, - &down_threshold_hotplug_2.attr, - &down_threshold_hotplug_3.attr, - &cpu_down_block_cycles.attr, - &cpu_hotplug_block_cycles.attr, - &touch_boost_cpu.attr, - &touch_boost_cpu_all_cores.attr, - &touch_boost_2nd_core.attr, - &touch_boost_3rd_core.attr, - &touch_boost_4th_core.attr, - &boost_2nd_core_on_button.attr, - &boost_3rd_core_on_button.attr, - &boost_4th_core_on_button.attr, - &lockout_2nd_core_hotplug.attr, - &lockout_3rd_core_hotplug.attr, - &lockout_4th_core_hotplug.attr, - //&touch_boost_gpu.attr, - &sync_extra_cores.attr, - &boost_hold_cycles.attr, - &disable_hotplugging.attr, - &disable_hotplugging_chrg.attr, - &disable_hotplug_bt.attr, - &no_extra_cores_screen_off.attr, - &ignore_nice_load.attr, - &freq_step.attr, - NULL -}; - -static struct attribute_group dbs_attr_group = { - .attrs = dbs_attributes, - .name = "ktoonservativeq", -}; - -/************************** sysfs end ************************/ - -static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) -{ - unsigned int load = 0; - unsigned int max_load = 0; - unsigned int freq_target; - int cpu; - struct cpufreq_policy *policy; - unsigned int j; - - policy = this_dbs_info->cur_policy; - - if (boostpulse_relayf) - { - if (stored_sampling_rate != 0 && screen_is_on) - dbs_tuners_ins.sampling_rate = stored_sampling_rate; - this_dbs_info->down_skip = 0; - - if (boost_hold_cycles_cnt >= dbs_tuners_ins.boost_hold_cycles) - { - boostpulse_relayf = false; - boost_hold_cycles_cnt = 0; - if (dbs_tuners_ins.sync_extra_cores == 0) - { - for (cpu = 0; cpu < CPUS_AVAILABLE; cpu++) - kt_freq_control[cpu] = 0; - } - goto boostcomplete; - } - boost_hold_cycles_cnt++; - - if (dbs_tuners_ins.touch_boost_cpu_all_cores && policy->cpu == 0) - { - for (cpu = 1; cpu < CPUS_AVAILABLE; cpu++) - { - if (&trmlpolicy[cpu] != NULL) - { - if (cpu_online(cpu)) - { - if (trmlpolicy[cpu].cur < dbs_tuners_ins.touch_boost_cpu) - { - //__cpufreq_driver_target(&trmlpolicy[cpu], dbs_tuners_ins.touch_boost_cpu, - // CPUFREQ_RELATION_H); - kt_freq_control[cpu] = dbs_tuners_ins.touch_boost_cpu; - //pr_alert("BOOST EXTRA CPUs: %d\n", cpu); - } - } - } - } - } - - /* if we are already at full speed then break out early */ - if (this_dbs_info->requested_freq == policy->max || policy->cur > dbs_tuners_ins.touch_boost_cpu || this_dbs_info->requested_freq > dbs_tuners_ins.touch_boost_cpu) - return; - - this_dbs_info->requested_freq = dbs_tuners_ins.touch_boost_cpu; - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, - CPUFREQ_RELATION_H); -boostcomplete: - return; - } - - /* - * Every sampling_rate, we check, if current idle time is less - * than 20% (default), then we try to increase frequency - * Every sampling_rate*sampling_down_factor, we check, if current - * idle time is more than 80%, then we try to decrease frequency - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of maximum frequency - */ - - /* Get Absolute Load */ - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time; - unsigned int idle_time, wall_time; - - j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); - - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - - wall_time = (unsigned int) - (cur_wall_time - j_dbs_info->prev_cpu_wall); - j_dbs_info->prev_cpu_wall = cur_wall_time; - - idle_time = (unsigned int) - (cur_idle_time - j_dbs_info->prev_cpu_idle); - j_dbs_info->prev_cpu_idle = cur_idle_time; - - if (dbs_tuners_ins.ignore_nice) { - u64 cur_nice; - unsigned long cur_nice_jiffies; - - cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - - j_dbs_info->prev_cpu_nice; - /* - * Assumption: nice time between sampling periods will - * be less than 2^32 jiffies for 32 bit sys - */ - cur_nice_jiffies = (unsigned long) - cputime64_to_jiffies64(cur_nice); - - j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - idle_time += jiffies_to_usecs(cur_nice_jiffies); - } - - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - load = 100 * (wall_time - idle_time) / wall_time; - if (load > max_load) - max_load = load; - //max_load += load; - //pr_alert("LOAD CHECK2: %d-%d", load, max_load); - } - //max_load = max_load / num_online_cpus(); - /* - * break out if we 'cannot' reduce the speed as the user might - * want freq_step to be zero - */ - if (dbs_tuners_ins.freq_step == 0) - return; - - if (policy->cpu == 0) - { - for (cpu = 1; cpu < CPUS_AVAILABLE; cpu++) - { - if (max_load >= hotplug_cpu_enable_up[cpu] && (!cpu_online(cpu)) && hotplug_cpu_lockout[cpu] != 2) - { - if (Lcpu_hotplug_block_cycles > dbs_tuners_ins.cpu_hotplug_block_cycles) - { - hotplug_cpu_single_up[cpu] = 1; - hotplug_flag_on = true; - Lcpu_hotplug_block_cycles = 0; - } - Lcpu_hotplug_block_cycles++; - break; - } - else if (max_load <= hotplug_cpu_enable_down[CPUS_AVAILABLE - cpu] && (cpu_online(CPUS_AVAILABLE - cpu)) && hotplug_cpu_lockout[CPUS_AVAILABLE - cpu] != 1) - { - hotplug_cpu_single_down[CPUS_AVAILABLE - cpu] = 1; - hotplug_flag_off = true; - break; - } - } - //pr_alert("LOAD CHECK: %d-%d-%d-%d-%d-%d-%d\n", max_load, hotplug_cpu_single_up[1], hotplug_cpu_single_up[2], hotplug_cpu_single_up[3], hotplug_cpu_enable_up[1], hotplug_cpu_enable_up[2], hotplug_cpu_enable_up[3]); - - /* Check for frequency increase is greater than hotplug value */ - //CPUS_AVAILABLE - if (hotplug_flag_on) { - if (policy->cur > (policy->min * 2)) - { - if (Lcpu_up_block_cycles > dbs_tuners_ins.cpu_down_block_cycles && (dbs_tuners_ins.no_extra_cores_screen_off == 0 || (dbs_tuners_ins.no_extra_cores_screen_off == 1 && screen_is_on))) - { - hotplug_flag_on = false; - if (!hotplugInProgress && policy->cpu == 0) - queue_work_on(policy->cpu, dbs_wq, &hotplug_online_work); - Lcpu_up_block_cycles = 0; - } - Lcpu_up_block_cycles++; - } - } - } - - /* Check for frequency increase */ - if (max_load > dbs_tuners_ins.up_threshold) { - this_dbs_info->down_skip = 0; - - /* if we are already at full speed then break out early */ - if (this_dbs_info->requested_freq == policy->max) - return; - - freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - - /* max freq cannot be less than 100. But who knows.... */ - if (unlikely(freq_target == 0)) - freq_target = 5; - - this_dbs_info->requested_freq += freq_target; - if (this_dbs_info->requested_freq > policy->max) - this_dbs_info->requested_freq = policy->max; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, CPUFREQ_RELATION_H); - if (dbs_tuners_ins.sync_extra_cores && policy->cpu == 0) - setExtraCores(this_dbs_info->requested_freq); - return; - } - - if (policy->cpu == 0 && hotplug_flag_off && !dbs_tuners_ins.disable_hotplugging && !disable_hotplugging_chrg_override && disable_hotplug_bt_active == false) { - if (num_online_cpus() > 1) - { - if (Lcpu_down_block_cycles > dbs_tuners_ins.cpu_down_block_cycles) - { - hotplug_flag_off = false; - if (!hotplugInProgress && policy->cpu == 0) - queue_work_on(policy->cpu, dbs_wq, &hotplug_offline_work); - Lcpu_down_block_cycles = 0; - } - Lcpu_down_block_cycles++; - } - } - /* - * The optimal frequency is the frequency that is the lowest that - * can support the current CPU usage without triggering the up - * policy. To be safe, we focus 10 points under the threshold. - */ - if (max_load < (dbs_tuners_ins.down_threshold - 10)) { - freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - - this_dbs_info->requested_freq -= freq_target; - if (this_dbs_info->requested_freq < policy->min) - this_dbs_info->requested_freq = policy->min; - - /* - * if we cannot reduce the frequency anymore, break out early - */ - if (policy->cur == policy->min) - return; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, CPUFREQ_RELATION_H); - if (dbs_tuners_ins.sync_extra_cores && policy->cpu == 0) - setExtraCores(this_dbs_info->requested_freq); - return; - } -} - -void setExtraCores(unsigned int requested_freq) -{ - unsigned int cpu; - for (cpu = 1; cpu < CPUS_AVAILABLE; cpu++) - { - if (&trmlpolicy[cpu] != NULL) - { - if (cpu_online(cpu)) - { - //__cpufreq_driver_target(&trmlpolicy[cpu], requested_freq, CPUFREQ_RELATION_H); - kt_freq_control[cpu] = requested_freq; - //pr_alert("BOOST EXTRA CPUs: %d\n", cpu); - } - } - } -} - -void check_boost_cores_up(bool dec1, bool dec2, bool dec3) -{ - bool got_boost_core = false; - - if (!cpu_online(1) && dec1 && hotplug_cpu_lockout[1] != 2) - { - hotplug_cpu_single_up[1] = 1; - got_boost_core = true; - } - if (!cpu_online(2) && dec2 && hotplug_cpu_lockout[2] != 2) - { - hotplug_cpu_single_up[2] = 1; - got_boost_core = true; - } - if (!cpu_online(3) && dec3 && hotplug_cpu_lockout[3] != 2) - { - hotplug_cpu_single_up[3] = 1; - got_boost_core = true; - } - if (got_boost_core) - { - if (!hotplugInProgress) - queue_work_on(0, dbs_wq, &hotplug_online_work); - } -} - -void screen_is_on_relay_kt(bool state) -{ - screen_is_on = state; - if (state == true) - { - if (stored_sampling_rate > 0) - dbs_tuners_ins.sampling_rate = stored_sampling_rate; //max(input, min_sampling_rate); - - check_boost_cores_up(dbs_tuners_ins.boost_2nd_core_on_button, dbs_tuners_ins.boost_3rd_core_on_button, dbs_tuners_ins.boost_4th_core_on_button); - - //pr_alert("SCREEN_IS_ON1: %d-%d\n", dbs_tuners_ins.sampling_rate, stored_sampling_rate); - } - else - { - stored_sampling_rate = dbs_tuners_ins.sampling_rate; - dbs_tuners_ins.sampling_rate = dbs_tuners_ins.sampling_rate_screen_off; - //pr_alert("SCREEN_IS_ON2: %d-%d\n", dbs_tuners_ins.sampling_rate, stored_sampling_rate); - } - -} - -void boostpulse_relay_kt(void) -{ - if (!boostpulse_relayf) - { - bool got_boost_core = false; - - if (dbs_tuners_ins.touch_boost_2nd_core == 0 && dbs_tuners_ins.touch_boost_3rd_core == 0 && dbs_tuners_ins.touch_boost_4th_core == 0 && dbs_tuners_ins.touch_boost_cpu == 0) // && dbs_tuners_ins.touch_boost_gpu == 0) - return; - /*if (dbs_tuners_ins.touch_boost_gpu > 0) - { - int bpc = (dbs_tuners_ins.boost_hold_cycles / 2); - if (dbs_tuners_ins.boost_hold_cycles > 0) - boost_the_gpu(dbs_tuners_ins.touch_boost_gpu, bpc); - else - boost_the_gpu(dbs_tuners_ins.touch_boost_gpu, 0); - }*/ - check_boost_cores_up(dbs_tuners_ins.touch_boost_2nd_core, dbs_tuners_ins.touch_boost_3rd_core, dbs_tuners_ins.touch_boost_4th_core); - - boostpulse_relayf = true; - boost_hold_cycles_cnt = 0; - //dbs_tuners_ins.sampling_rate = min_sampling_rate; - //pr_info("BOOSTPULSE RELAY KT"); - } - else - { - /*if (dbs_tuners_ins.touch_boost_gpu > 0) - { - int bpc = (dbs_tuners_ins.boost_hold_cycles / 2); - if (dbs_tuners_ins.boost_hold_cycles > 0) - boost_the_gpu(dbs_tuners_ins.touch_boost_gpu, bpc); - else - boost_the_gpu(dbs_tuners_ins.touch_boost_gpu, 0); - }*/ - boost_hold_cycles_cnt = 0; - } -} - -static void __cpuinit hotplug_offline_work_fn(struct work_struct *work) -{ - int cpu; - //pr_info("ENTER OFFLINE"); - for_each_online_cpu(cpu) { - if (likely(cpu_online(cpu) && (cpu))) { - if (hotplug_cpu_single_down[cpu]) - { - hotplug_cpu_single_down[cpu] = 0; - cpu_down(cpu); - } - //pr_info("auto_hotplug: CPU%d down.\n", cpu); - } - } - hotplugInProgress = false; -} - -static void __cpuinit hotplug_online_work_fn(struct work_struct *work) -{ - int cpu; - //pr_info("ENTER ONLINE"); - for_each_possible_cpu(cpu) { - if (likely(!cpu_online(cpu) && (cpu))) { - if (hotplug_cpu_single_up[cpu]) - { - hotplug_cpu_single_up[cpu] = 0; - cpu_up(cpu); - } - //pr_info("auto_hotplug: CPU%d up.\n", cpu); - } - } - hotplugInProgress = false; -} - -static void do_dbs_timer(struct work_struct *work) -{ - struct cpu_dbs_info_s *dbs_info = - container_of(work, struct cpu_dbs_info_s, work.work); - unsigned int cpu = dbs_info->cpu; - - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - delay -= jiffies % delay; - - mutex_lock(&dbs_info->timer_mutex); - - dbs_check_cpu(dbs_info); - - queue_delayed_work_on(cpu, dbs_wq, &dbs_info->work, delay); - mutex_unlock(&dbs_info->timer_mutex); -} - -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) -{ - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - delay -= jiffies % delay; - - dbs_info->enable = 1; - INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - queue_delayed_work_on(dbs_info->cpu, dbs_wq, &dbs_info->work, delay); -} - -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) -{ - dbs_info->enable = 0; - cancel_delayed_work_sync(&dbs_info->work); - cancel_work_sync(&hotplug_offline_work); - cancel_work_sync(&hotplug_online_work); -} - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - ktoonservative_is_active(true); - ktoonservative_is_activebd(true); - ktoonservative_is_activechrg(true); - if (dbs_tuners_ins.boost_2nd_core_on_button == 1 || dbs_tuners_ins.boost_3rd_core_on_button == 1 || dbs_tuners_ins.boost_4th_core_on_button == 1) - { - //kt_is_active_benabled_gpio(true); - kt_is_active_benabled_touchkey(true); - //kt_is_active_benabled_power(true); - } - - prev_apenable = apget_enable_auto_hotplug(); - apenable_auto_hotplug(false); - - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - mutex_lock(&dbs_mutex); - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) { - j_dbs_info->prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - } - this_dbs_info->cpu = cpu; - this_dbs_info->down_skip = 0; - this_dbs_info->requested_freq = policy->cur; - - mutex_init(&this_dbs_info->timer_mutex); - dbs_enable++; - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - unsigned int latency; - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - - rc = sysfs_create_group(cpufreq_global_kobject, - &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - min_sampling_rate = (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) / 20; - /* Bring kernel and HW constraints together */ - min_sampling_rate = max(min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - dbs_tuners_ins.sampling_rate = 45000; - //max((min_sampling_rate * 20), - //latency * LATENCY_MULTIPLIER); - - cpufreq_register_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } - mutex_unlock(&dbs_mutex); - - dbs_timer_init(this_dbs_info); - - break; - - case CPUFREQ_GOV_STOP: - ktoonservative_is_active(false); - ktoonservative_is_activebd(false); - ktoonservative_is_activechrg(false); - //kt_is_active_benabled_gpio(false); - kt_is_active_benabled_touchkey(false); - //kt_is_active_benabled_power(false); - - apenable_auto_hotplug(prev_apenable); - - dbs_timer_exit(this_dbs_info); - - mutex_lock(&dbs_mutex); - dbs_enable--; - mutex_destroy(&this_dbs_info->timer_mutex); - - /* - * Stop the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 0) - cpufreq_unregister_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - - mutex_unlock(&dbs_mutex); - if (!dbs_enable) - sysfs_remove_group(cpufreq_global_kobject, - &dbs_attr_group); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_dbs_info->timer_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - { - __cpufreq_driver_target(this_dbs_info->cur_policy, policy->max, CPUFREQ_RELATION_H); - } - else if (policy->min > this_dbs_info->cur_policy->cur) - { - __cpufreq_driver_target(this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); - } - dbs_check_cpu(this_dbs_info); - mutex_unlock(&this_dbs_info->timer_mutex); - - break; - } - return 0; -} - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_KTOONSERVATIVEQ -static -#endif -struct cpufreq_governor cpufreq_gov_ktoonservative = { - .name = "ktoonservativeq", - .governor = cpufreq_governor_dbs, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, -}; - -static int __init cpufreq_gov_dbs_init(void) -{ - dbs_wq = alloc_workqueue("ktoonservativeq_dbs_wq", WQ_HIGHPRI, 0); - if (!dbs_wq) { - printk(KERN_ERR "Failed to create ktoonservativeq_dbs_wq workqueue\n"); - return -EFAULT; - } - - INIT_WORK(&hotplug_offline_work, hotplug_offline_work_fn); - INIT_WORK(&hotplug_online_work, hotplug_online_work_fn); - - return cpufreq_register_governor(&cpufreq_gov_ktoonservative); -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_ktoonservative); - destroy_workqueue(dbs_wq); -} - -MODULE_AUTHOR("Alexander Clouter "); -MODULE_DESCRIPTION("'cpufreq_ktoonservativeq' - A dynamic cpufreq governor for " - "Low Latency Frequency Transition capable processors " - "optimised for use in a battery environment"); -MODULE_LICENSE("GPL"); - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_KTOONSERVATIVEQ -fs_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); -#endif -module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_smartassv2.c b/drivers/cpufreq/cpufreq_smartassv2.c deleted file mode 100644 index 3b51a788..00000000 --- a/drivers/cpufreq/cpufreq_smartassv2.c +++ /dev/null @@ -1,905 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_smartassv2.c - * - * Copyright (C) 2010 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Author: Erasmux - * - * Based on the interactive governor By Mike Chan (mike@android.com) - * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net) - * - * SMP support based on mod by faux123 - * - * ZTE Skate specific tweaks by H3ROS @ MoDaCo, integrated by C3C0 @ MoDaCo - * - * For a general overview of smartassV2 see the relavent part in - * Documentation/cpu-freq/governors.txt - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -/******************** Tunable parameters: ********************/ - -/* - * The "ideal" frequency to use when awake. The governor will ramp up faster - * towards the ideal frequency and slower after it has passed it. Similarly, - * lowering the frequency towards the ideal frequency is faster than below it. - */ -#define DEFAULT_AWAKE_IDEAL_FREQ 378000 -static unsigned int awake_ideal_freq; - -/* - * The "ideal" frequency to use when suspended. - * When set to 0, the governor will not track the suspended state (meaning - * that practically when sleep_ideal_freq==0 the awake_ideal_freq is used - * also when suspended). - */ -#define DEFAULT_SLEEP_IDEAL_FREQ 378000 -static unsigned int sleep_ideal_freq; - -/* - * Freqeuncy delta when ramping up above the ideal freqeuncy. - * Zero disables and causes to always jump straight to max frequency. - * When below the ideal freqeuncy we always ramp up to the ideal freq. - */ -#define DEFAULT_RAMP_UP_STEP 80000 -static unsigned int ramp_up_step; - -/* - * Freqeuncy delta when ramping down below the ideal freqeuncy. - * Zero disables and will calculate ramp down according to load heuristic. - * When above the ideal freqeuncy we always ramp down to the ideal freq. - */ -#define DEFAULT_RAMP_DOWN_STEP 80000 -static unsigned int ramp_down_step; - -/* - * CPU freq will be increased if measured load > max_cpu_load; - */ -#define DEFAULT_MAX_CPU_LOAD 85 -static unsigned long max_cpu_load; - -/* - * CPU freq will be decreased if measured load < min_cpu_load; - */ -#define DEFAULT_MIN_CPU_LOAD 70 -static unsigned long min_cpu_load; - -/* - * The minimum amount of time to spend at a frequency before we can ramp up. - * Notice we ignore this when we are below the ideal frequency. - */ -#define DEFAULT_UP_RATE_US 48000; -static unsigned long up_rate_us; - -/* - * The minimum amount of time to spend at a frequency before we can ramp down. - * Notice we ignore this when we are above the ideal frequency. - */ -#define DEFAULT_DOWN_RATE_US 49000; -static unsigned long down_rate_us; - -/* - * The frequency to set when waking up from sleep. - * When sleep_ideal_freq=0 this will have no effect. - */ -#define DEFAULT_SLEEP_WAKEUP_FREQ 99999999 -static unsigned int sleep_wakeup_freq; - -/* - * Sampling rate, I highly recommend to leave it at 2. - */ -#define DEFAULT_SAMPLE_RATE_JIFFIES 2 -static unsigned int sample_rate_jiffies; - - -/*************** End of tunables ***************/ - - -static void (*pm_idle_old)(void); -static atomic_t active_count = ATOMIC_INIT(0); - -struct smartass_info_s { - struct cpufreq_policy *cur_policy; - struct cpufreq_frequency_table *freq_table; - struct timer_list timer; - u64 time_in_idle; - u64 idle_exit_time; - u64 freq_change_time; - u64 freq_change_time_in_idle; - int cur_cpu_load; - int old_freq; - int ramp_dir; - unsigned int enable; - int ideal_speed; -}; -static DEFINE_PER_CPU(struct smartass_info_s, smartass_info); - -/* Workqueues handle frequency scaling */ -static struct workqueue_struct *up_wq; -static struct workqueue_struct *down_wq; -static struct work_struct freq_scale_work; - -static cpumask_t work_cpumask; -static spinlock_t cpumask_lock; - -static unsigned int suspended; - -#define dprintk(flag,msg...) do { \ - if (debug_mask & flag) printk(KERN_DEBUG msg); \ - } while (0) - -enum { - SMARTASS_DEBUG_JUMPS=1, - SMARTASS_DEBUG_LOAD=2, - SMARTASS_DEBUG_ALG=4 -}; - -/* - * Combination of the above debug flags. - */ -static unsigned long debug_mask; - -static int cpufreq_governor_smartassv2(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASSV2 -static -#endif -struct cpufreq_governor cpufreq_gov_smartassv2 = { - .name = "smartassv2", - .governor = cpufreq_governor_smartassv2, - .max_transition_latency = 9000000, - .owner = THIS_MODULE, -}; - -inline static void smartass_update_min_max(struct smartass_info_s *this_smartass, struct cpufreq_policy *policy, int suspend) { - if (suspend) { - this_smartass->ideal_speed = // sleep_ideal_freq; but make sure it obeys the policy min/max - policy->max > sleep_ideal_freq ? - (sleep_ideal_freq > policy->min ? sleep_ideal_freq : policy->min) : policy->max; - } else { - this_smartass->ideal_speed = // awake_ideal_freq; but make sure it obeys the policy min/max - policy->min < awake_ideal_freq ? - (awake_ideal_freq < policy->max ? awake_ideal_freq : policy->max) : policy->min; - } -} - -inline static void smartass_update_min_max_allcpus(void) { - unsigned int i; - for_each_online_cpu(i) { - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, i); - if (this_smartass->enable) - smartass_update_min_max(this_smartass,this_smartass->cur_policy,suspended); - } -} - -inline static unsigned int validate_freq(struct cpufreq_policy *policy, int freq) { - if (freq > (int)policy->max) - return policy->max; - if (freq < (int)policy->min) - return policy->min; - return freq; -} - -inline static void reset_timer(unsigned long cpu, struct smartass_info_s *this_smartass) { - this_smartass->time_in_idle = get_cpu_idle_time_us(cpu, &this_smartass->idle_exit_time); - mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies); -} - -inline static void work_cpumask_set(unsigned long cpu) { - unsigned long flags; - spin_lock_irqsave(&cpumask_lock, flags); - cpumask_set_cpu(cpu, &work_cpumask); - spin_unlock_irqrestore(&cpumask_lock, flags); -} - -inline static int work_cpumask_test_and_clear(unsigned long cpu) { - unsigned long flags; - int res = 0; - spin_lock_irqsave(&cpumask_lock, flags); - res = cpumask_test_and_clear_cpu(cpu, &work_cpumask); - spin_unlock_irqrestore(&cpumask_lock, flags); - return res; -} - -inline static int target_freq(struct cpufreq_policy *policy, struct smartass_info_s *this_smartass, - int new_freq, int old_freq, int prefered_relation) { - int index, target; - struct cpufreq_frequency_table *table = this_smartass->freq_table; - - if (new_freq == old_freq) - return 0; - new_freq = validate_freq(policy,new_freq); - if (new_freq == old_freq) - return 0; - - if (table && - !cpufreq_frequency_table_target(policy,table,new_freq,prefered_relation,&index)) - { - target = table[index].frequency; - if (target == old_freq) { - // if for example we are ramping up to *at most* current + ramp_up_step - // but there is no such frequency higher than the current, try also - // to ramp up to *at least* current + ramp_up_step. - if (new_freq > old_freq && prefered_relation==CPUFREQ_RELATION_H - && !cpufreq_frequency_table_target(policy,table,new_freq, - CPUFREQ_RELATION_L,&index)) - target = table[index].frequency; - // simlarly for ramping down: - else if (new_freq < old_freq && prefered_relation==CPUFREQ_RELATION_L - && !cpufreq_frequency_table_target(policy,table,new_freq, - CPUFREQ_RELATION_H,&index)) - target = table[index].frequency; - } - - if (target == old_freq) { - // We should not get here: - // If we got here we tried to change to a validated new_freq which is different - // from old_freq, so there is no reason for us to remain at same frequency. - printk(KERN_WARNING "Smartass: frequency change failed: %d to %d => %d\n", - old_freq,new_freq,target); - return 0; - } - } - else target = new_freq; - - __cpufreq_driver_target(policy, target, prefered_relation); - - dprintk(SMARTASS_DEBUG_JUMPS,"SmartassQ: jumping from %d to %d => %d (%d)\n", - old_freq,new_freq,target,policy->cur); - - return target; -} - -static void cpufreq_smartass_timer(unsigned long cpu) -{ - u64 delta_idle; - u64 delta_time; - int cpu_load; - int old_freq; - u64 update_time; - u64 now_idle; - int queued_work = 0; - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); - struct cpufreq_policy *policy = this_smartass->cur_policy; - - now_idle = get_cpu_idle_time_us(cpu, &update_time); - old_freq = policy->cur; - - if (this_smartass->idle_exit_time == 0 || update_time == this_smartass->idle_exit_time) - return; - - delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle); - delta_time = cputime64_sub(update_time, this_smartass->idle_exit_time); - - // If timer ran less than 1ms after short-term sample started, retry. - if (delta_time < 1000) { - if (!timer_pending(&this_smartass->timer)) - reset_timer(cpu,this_smartass); - return; - } - - if (delta_idle > delta_time) - cpu_load = 0; - else - cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time; - - dprintk(SMARTASS_DEBUG_LOAD,"smartassT @ %d: load %d (delta_time %llu)\n", - old_freq,cpu_load,delta_time); - - this_smartass->cur_cpu_load = cpu_load; - this_smartass->old_freq = old_freq; - - // Scale up if load is above max or if there where no idle cycles since coming out of idle, - // additionally, if we are at or above the ideal_speed, verify we have been at this frequency - // for at least up_rate_us: - if (cpu_load > max_cpu_load || delta_idle == 0) - { - if (old_freq < policy->max && - (old_freq < this_smartass->ideal_speed || delta_idle == 0 || - cputime64_sub(update_time, this_smartass->freq_change_time) >= up_rate_us)) - { - dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp up: load %d (delta_idle %llu)\n", - old_freq,cpu_load,delta_idle); - this_smartass->ramp_dir = 1; - work_cpumask_set(cpu); - queue_work(up_wq, &freq_scale_work); - queued_work = 1; - } - else this_smartass->ramp_dir = 0; - } - // Similarly for scale down: load should be below min and if we are at or below ideal - // frequency we require that we have been at this frequency for at least down_rate_us: - else if (cpu_load < min_cpu_load && old_freq > policy->min && - (old_freq > this_smartass->ideal_speed || - cputime64_sub(update_time, this_smartass->freq_change_time) >= down_rate_us)) - { - dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp down: load %d (delta_idle %llu)\n", - old_freq,cpu_load,delta_idle); - this_smartass->ramp_dir = -1; - work_cpumask_set(cpu); - queue_work(down_wq, &freq_scale_work); - queued_work = 1; - } - else this_smartass->ramp_dir = 0; - - // To avoid unnecessary load when the CPU is already at high load, we don't - // reset ourselves if we are at max speed. If and when there are idle cycles, - // the idle loop will activate the timer. - // Additionally, if we queued some work, the work task will reset the timer - // after it has done its adjustments. - if (!queued_work && old_freq < policy->max) - reset_timer(cpu,this_smartass); -} - -static void cpufreq_idle(void) -{ - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); - struct cpufreq_policy *policy = this_smartass->cur_policy; - - if (!this_smartass->enable) { - pm_idle_old(); - return; - } - - if (policy->cur == policy->min && timer_pending(&this_smartass->timer)) - del_timer(&this_smartass->timer); - - pm_idle_old(); - - if (!timer_pending(&this_smartass->timer)) - reset_timer(smp_processor_id(), this_smartass); -} - -static int cpufreq_idle_notifier(struct notifier_block *nb, - unsigned long val, void *data) { - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); - struct cpufreq_policy *policy = this_smartass->cur_policy; - - if (!this_smartass->enable) - return NOTIFY_DONE; - - if (val == IDLE_START) { - if (policy->cur == policy->max && !timer_pending(&this_smartass->timer)) { - reset_timer(smp_processor_id(), this_smartass); - } else if (policy->cur == policy->min) { - if (timer_pending(&this_smartass->timer)) - del_timer(&this_smartass->timer); - } - } else if (val == IDLE_END) { - if (policy->cur == policy->min && !timer_pending(&this_smartass->timer)) - reset_timer(smp_processor_id(), this_smartass); - } - - return NOTIFY_OK; -} -static struct notifier_block cpufreq_idle_nb = { - .notifier_call = cpufreq_idle_notifier, -}; - -/* We use the same work function to sale up and down */ -static void cpufreq_smartass_freq_change_time_work(struct work_struct *work) -{ - unsigned int cpu; - int new_freq; - int old_freq; - int ramp_dir; - struct smartass_info_s *this_smartass; - struct cpufreq_policy *policy; - unsigned int relation = CPUFREQ_RELATION_L; - for_each_possible_cpu(cpu) { - this_smartass = &per_cpu(smartass_info, cpu); - if (!work_cpumask_test_and_clear(cpu)) - continue; - - ramp_dir = this_smartass->ramp_dir; - this_smartass->ramp_dir = 0; - - old_freq = this_smartass->old_freq; - policy = this_smartass->cur_policy; - - if (old_freq != policy->cur) { - // frequency was changed by someone else? - printk(KERN_WARNING "Smartass: frequency changed by 3rd party: %d to %d\n", - old_freq,policy->cur); - new_freq = old_freq; - } - else if (ramp_dir > 0 && nr_running() > 1) { - // ramp up logic: - if (old_freq < this_smartass->ideal_speed) - new_freq = this_smartass->ideal_speed; - else if (ramp_up_step) { - new_freq = old_freq + ramp_up_step; - relation = CPUFREQ_RELATION_H; - } - else { - new_freq = policy->max; - relation = CPUFREQ_RELATION_H; - } - dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp up: ramp_dir=%d ideal=%d\n", - old_freq,ramp_dir,this_smartass->ideal_speed); - } - else if (ramp_dir < 0) { - // ramp down logic: - if (old_freq > this_smartass->ideal_speed) { - new_freq = this_smartass->ideal_speed; - relation = CPUFREQ_RELATION_H; - } - else if (ramp_down_step) - new_freq = old_freq - ramp_down_step; - else { - // Load heuristics: Adjust new_freq such that, assuming a linear - // scaling of load vs. frequency, the load in the new frequency - // will be max_cpu_load: - new_freq = old_freq * this_smartass->cur_cpu_load / max_cpu_load; - if (new_freq > old_freq) // min_cpu_load > max_cpu_load ?! - new_freq = old_freq -1; - } - dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp down: ramp_dir=%d ideal=%d\n", - old_freq,ramp_dir,this_smartass->ideal_speed); - } - else { // ramp_dir==0 ?! Could the timer change its mind about a queued ramp up/down - // before the work task gets to run? - // This may also happen if we refused to ramp up because the nr_running()==1 - new_freq = old_freq; - dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d nothing: ramp_dir=%d nr_running=%lu\n", - old_freq,ramp_dir,nr_running()); - } - - // do actual ramp up (returns 0, if frequency change failed): - new_freq = target_freq(policy,this_smartass,new_freq,old_freq,relation); - if (new_freq) - this_smartass->freq_change_time_in_idle = - get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time); - - // reset timer: - if (new_freq < policy->max) - reset_timer(cpu,this_smartass); - // if we are maxed out, it is pointless to use the timer - // (idle cycles wake up the timer when the timer comes) - else if (timer_pending(&this_smartass->timer)) - del_timer(&this_smartass->timer); - - cpufreq_notify_utilization(policy, - (this_smartass->cur_cpu_load * policy->cur) / policy->max); - } -} - -static ssize_t show_debug_mask(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", debug_mask); -} - -static ssize_t store_debug_mask(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0) - debug_mask = input; - return res; -} - -static ssize_t show_up_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", up_rate_us); -} - -static ssize_t store_up_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0 && input <= 100000000) - up_rate_us = input; - return res; -} - -static ssize_t show_down_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", down_rate_us); -} - -static ssize_t store_down_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0 && input <= 100000000) - down_rate_us = input; - return res; -} - -static ssize_t show_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", sleep_ideal_freq); -} - -static ssize_t store_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) { - sleep_ideal_freq = input; - if (suspended) - smartass_update_min_max_allcpus(); - } - return res; -} - -static ssize_t show_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", sleep_wakeup_freq); -} - -static ssize_t store_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - sleep_wakeup_freq = input; - return res; -} - -static ssize_t show_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", awake_ideal_freq); -} - -static ssize_t store_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) { - awake_ideal_freq = input; - if (!suspended) - smartass_update_min_max_allcpus(); - } - return res; -} - -static ssize_t show_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", sample_rate_jiffies); -} - -static ssize_t store_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input <= 1000) - sample_rate_jiffies = input; - return res; -} - -static ssize_t show_ramp_up_step(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", ramp_up_step); -} - -static ssize_t store_ramp_up_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - ramp_up_step = input; - return res; -} - -static ssize_t show_ramp_down_step(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", ramp_down_step); -} - -static ssize_t store_ramp_down_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - ramp_down_step = input; - return res; -} - -static ssize_t show_max_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", max_cpu_load); -} - -static ssize_t store_max_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input <= 100) - max_cpu_load = input; - return res; -} - -static ssize_t show_min_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", min_cpu_load); -} - -static ssize_t store_min_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input < 100) - min_cpu_load = input; - return res; -} - -#define define_global_rw_attr(_name) \ -static struct global_attr _name##_attr = \ - __ATTR(_name, 0644, show_##_name, store_##_name) - -define_global_rw_attr(debug_mask); -define_global_rw_attr(up_rate_us); -define_global_rw_attr(down_rate_us); -define_global_rw_attr(sleep_ideal_freq); -define_global_rw_attr(sleep_wakeup_freq); -define_global_rw_attr(awake_ideal_freq); -define_global_rw_attr(sample_rate_jiffies); -define_global_rw_attr(ramp_up_step); -define_global_rw_attr(ramp_down_step); -define_global_rw_attr(max_cpu_load); -define_global_rw_attr(min_cpu_load); - -static struct attribute * smartass_attributes[] = { - &debug_mask_attr.attr, - &up_rate_us_attr.attr, - &down_rate_us_attr.attr, - &sleep_ideal_freq_attr.attr, - &sleep_wakeup_freq_attr.attr, - &awake_ideal_freq_attr.attr, - &sample_rate_jiffies_attr.attr, - &ramp_up_step_attr.attr, - &ramp_down_step_attr.attr, - &max_cpu_load_attr.attr, - &min_cpu_load_attr.attr, - NULL, -}; - -static struct attribute_group smartass_attr_group = { - .attrs = smartass_attributes, - .name = "smartassv2", -}; - -static int cpufreq_governor_smartassv2(struct cpufreq_policy *new_policy, - unsigned int event) -{ - unsigned int cpu = new_policy->cpu; - int rc; - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!new_policy->cur)) - return -EINVAL; - - this_smartass->cur_policy = new_policy; - - this_smartass->enable = 1; - - smartass_update_min_max(this_smartass,new_policy,suspended); - - this_smartass->freq_table = cpufreq_frequency_get_table(cpu); - if (!this_smartass->freq_table) - printk(KERN_WARNING "Smartass: no frequency table for cpu %d?!\n",cpu); - - smp_wmb(); - - // Do not register the idle hook and create sysfs - // entries if we have already done so. - if (atomic_inc_return(&active_count) <= 1) { - rc = sysfs_create_group(cpufreq_global_kobject, - &smartass_attr_group); - if (rc) - return rc; - - pm_idle_old = pm_idle; - pm_idle = cpufreq_idle; - idle_notifier_register(&cpufreq_idle_nb); - } - - if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer)) - reset_timer(cpu,this_smartass); - - break; - - case CPUFREQ_GOV_LIMITS: - smartass_update_min_max(this_smartass,new_policy,suspended); - - if (this_smartass->cur_policy->cur > new_policy->max) { - dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new max freq: %d\n",new_policy->max); - __cpufreq_driver_target(this_smartass->cur_policy, - new_policy->max, CPUFREQ_RELATION_H); - } - else if (this_smartass->cur_policy->cur < new_policy->min) { - dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new min freq: %d\n",new_policy->min); - __cpufreq_driver_target(this_smartass->cur_policy, - new_policy->min, CPUFREQ_RELATION_L); - } - - if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer)) - reset_timer(cpu,this_smartass); - - break; - - case CPUFREQ_GOV_STOP: - this_smartass->enable = 0; - smp_wmb(); - del_timer(&this_smartass->timer); - flush_work(&freq_scale_work); - this_smartass->idle_exit_time = 0; - - if (atomic_dec_return(&active_count) <= 1) { - sysfs_remove_group(cpufreq_global_kobject, - &smartass_attr_group); - pm_idle = pm_idle_old; - idle_notifier_unregister(&cpufreq_idle_nb); - } - break; - } - - return 0; -} - -static void smartass_suspend(int cpu, int suspend) -{ - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); - struct cpufreq_policy *policy = this_smartass->cur_policy; - unsigned int new_freq; - - if (!this_smartass->enable) - return; - - smartass_update_min_max(this_smartass,policy,suspend); - if (!suspend) { // resume at max speed: - new_freq = validate_freq(policy,sleep_wakeup_freq); - - dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: awaking at %d\n",new_freq); - - __cpufreq_driver_target(policy, new_freq, - CPUFREQ_RELATION_L); - } else { - // to avoid wakeup issues with quick sleep/wakeup don't change actual frequency when entering sleep - // to allow some time to settle down. Instead we just reset our statistics (and reset the timer). - // Eventually, the timer will adjust the frequency if necessary. - - this_smartass->freq_change_time_in_idle = - get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time); - - dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: suspending at %d\n",policy->cur); - } - - reset_timer(smp_processor_id(),this_smartass); -} - -static void smartass_early_suspend(struct early_suspend *handler) { - int i; - if (suspended || sleep_ideal_freq==0) // disable behavior for sleep_ideal_freq==0 - return; - suspended = 1; - for_each_online_cpu(i) - smartass_suspend(i,1); -} - -static void smartass_late_resume(struct early_suspend *handler) { - int i; - if (!suspended) // already not suspended so nothing to do - return; - suspended = 0; - for_each_online_cpu(i) - smartass_suspend(i,0); -} - -static struct early_suspend smartass_power_suspend = { - .suspend = smartass_early_suspend, - .resume = smartass_late_resume, -#ifdef CONFIG_MACH_HERO - .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, -#endif -}; - -static int __init cpufreq_smartass_init(void) -{ - unsigned int i; - struct smartass_info_s *this_smartass; - debug_mask = 0; - up_rate_us = DEFAULT_UP_RATE_US; - down_rate_us = DEFAULT_DOWN_RATE_US; - sleep_ideal_freq = DEFAULT_SLEEP_IDEAL_FREQ; - sleep_wakeup_freq = DEFAULT_SLEEP_WAKEUP_FREQ; - awake_ideal_freq = DEFAULT_AWAKE_IDEAL_FREQ; - sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES; - ramp_up_step = DEFAULT_RAMP_UP_STEP; - ramp_down_step = DEFAULT_RAMP_DOWN_STEP; - max_cpu_load = DEFAULT_MAX_CPU_LOAD; - min_cpu_load = DEFAULT_MIN_CPU_LOAD; - - spin_lock_init(&cpumask_lock); - - suspended = 0; - - /* Initalize per-cpu data: */ - for_each_possible_cpu(i) { - this_smartass = &per_cpu(smartass_info, i); - this_smartass->enable = 0; - this_smartass->cur_policy = 0; - this_smartass->ramp_dir = 0; - this_smartass->time_in_idle = 0; - this_smartass->idle_exit_time = 0; - this_smartass->freq_change_time = 0; - this_smartass->freq_change_time_in_idle = 0; - this_smartass->cur_cpu_load = 0; - // intialize timer: - init_timer_deferrable(&this_smartass->timer); - this_smartass->timer.function = cpufreq_smartass_timer; - this_smartass->timer.data = i; - work_cpumask_test_and_clear(i); - } - - // Scale up is high priority - up_wq = create_workqueue("ksmartass_up"); - down_wq = create_workqueue("ksmartass_down"); - if (!up_wq || !down_wq) - return -ENOMEM; - - INIT_WORK(&freq_scale_work, cpufreq_smartass_freq_change_time_work); - - register_early_suspend(&smartass_power_suspend); - - return cpufreq_register_governor(&cpufreq_gov_smartassv2); -} - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASSV2 -fs_initcall(cpufreq_smartass_init); -#else -module_init(cpufreq_smartass_init); -#endif - -static void __exit cpufreq_smartass_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_smartassv2); - destroy_workqueue(up_wq); - destroy_workqueue(down_wq); -} - -module_exit(cpufreq_smartass_exit); - -MODULE_AUTHOR ("Erasmux, moded by H3ROS & C3C0"); -MODULE_DESCRIPTION ("'cpufreq_smartassv2' - A smart cpufreq governor"); -MODULE_LICENSE ("GPL"); - - diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 5315db0a..049f880b 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -107,8 +107,8 @@ xt_socket_get4_sk(const struct sk_buff *skb, struct xt_action_param *par) struct udphdr _hdr, *hp = NULL; struct sock *sk; __be32 daddr, saddr; - __be16 dport, sport; - u8 protocol; + __be16 dport, sport = 0; + u8 protocol = 0; #ifdef XT_SOCKET_HAVE_CONNTRACK struct nf_conn const *ct; enum ip_conntrack_info ctinfo; @@ -265,8 +265,8 @@ xt_socket_get6_sk(const struct sk_buff *skb, struct xt_action_param *par) struct ipv6hdr *iph = ipv6_hdr(skb); struct udphdr _hdr, *hp = NULL; struct sock *sk; - struct in6_addr *daddr, *saddr; - __be16 dport, sport; + struct in6_addr *daddr = NULL, *saddr = NULL; + __be16 dport = 0, sport = 0; int thoff, tproto; tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); From 66f65652c57ddfd216bd07bdaac771ddfb05956c Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 07:50:48 -0400 Subject: [PATCH 04/35] Drivers: Fix warnings from IOWAIT_TIME --- drivers/cpufreq/Kconfig | 98 -- drivers/cpufreq/Makefile | 5 +- drivers/cpufreq/cpufreq_abyssplug.c | 817 ------------- drivers/cpufreq/cpufreq_adaptive.c | 952 --------------- drivers/cpufreq/cpufreq_nightmare.c | 1656 -------------------------- drivers/cpufreq/cpufreq_pegasusq.c | 1636 ------------------------- drivers/cpufreq/cpufreq_smartassH3.c | 904 -------------- 7 files changed, 3 insertions(+), 6065 deletions(-) delete mode 100644 drivers/cpufreq/cpufreq_abyssplug.c delete mode 100644 drivers/cpufreq/cpufreq_adaptive.c delete mode 100644 drivers/cpufreq/cpufreq_nightmare.c delete mode 100644 drivers/cpufreq/cpufreq_pegasusq.c delete mode 100644 drivers/cpufreq/cpufreq_smartassH3.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 6c5d815d..be30f8aa 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -50,29 +50,6 @@ choice This option sets which CPUFreq governor shall be loaded at startup. If in doubt, select 'performance'. -config CPU_FREQ_DEFAULT_GOV_ABYSSPLUG - bool "abyssplug" - select CPU_FREQ_GOV_ABYSSPLUG - select CPU_FREQ_GOV_PERFORMANCE - ---help--- - Use the CPUFreq governor 'abyssplug' as default. This allows you - to get a full dynamic frequency capable system with CPU - hotplug support by simply loading your cpufreq low-level - hardware driver. Be aware that not all cpufreq drivers - support the hotplug governor. If unsure have a look at - the help section of the driver. Fallback governor will be the - performance governor. - -config CPU_FREQ_DEFAULT_GOV_ADAPTIVE - bool "adaptive" - select CPU_FREQ_GOV_ADAPTIVE - help - Use the CPUFreq governor 'adaptive' as default. This allows - you to get a full dynamic cpu frequency capable system by simply - loading your cpufreq low-level hardware driver, using the - 'adaptive' governor for latency-sensitive workloads and demanding - performance. - config CPU_FREQ_DEFAULT_GOV_ASSWAX bool "asswax" select CPU_FREQ_GOV_ASSWAX @@ -135,24 +112,6 @@ config CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND Use the CPUFreq governor 'intellidemand' as default. This is based on Ondemand with browsing detection based on GPU loading -config CPU_FREQ_DEFAULT_GOV_KTOONSERVATIVEQ - bool "ktoonservativeq" - select CPU_FREQ_GOV_KTOONSERVATIVEQ - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'ktoonservativeq' as default. This allows - you to get a full dynamic frequency capable system by simply - loading your cpufreq low-level hardware driver. - Be aware that not all cpufreq drivers support the ktoonservativeq - governor. If unsure have a look at the help section of the - driver. Fallback governor will be the performance governor. This - governor adds the capability of hotpluging. - -config CPU_FREQ_DEFAULT_GOV_NIGHTMARE - bool "nightmare" - select CPU_FREQ_GOV_NIGHTMARE - help - config CPU_FREQ_DEFAULT_GOV_ONDEMAND bool "ondemand" select CPU_FREQ_GOV_ONDEMAND @@ -165,12 +124,6 @@ config CPU_FREQ_DEFAULT_GOV_ONDEMAND governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. -config CPU_FREQ_DEFAULT_GOV_PEGASUSQ - bool "pegasusq" - select CPU_FREQ_GOV_PEGASUSQ - help - Use the CPUFreq governor 'pegasusq' as default. - config CPU_FREQ_DEFAULT_GOV_SLP bool "slp" select CPU_FREQ_GOV_SLP @@ -194,12 +147,6 @@ config CPU_FREQ_DEFAULT_GOV_POWERSAVE the frequency statically to the lowest frequency supported by the CPU. -config CPU_FREQ_DEFAULT_GOV_SMARTASSH3 - bool "smartassH3" - select CPU_FREQ_GOV_SMARTASSH3 - help - Use the CPUFreq governor 'slp' as default. - config CPU_FREQ_DEFAULT_GOV_USERSPACE bool "userspace" select CPU_FREQ_GOV_USERSPACE @@ -224,38 +171,6 @@ config CPU_FREQ_GOV_LIONHEART help Use the CPUFreq governor 'lionheart' as default. -config CPU_FREQ_GOV_ABYSSPLUG - tristate "'abyssplug' cpufreq governor" - depends on CPU_FREQ && NO_HZ && HOTPLUG_CPU - ---help--- - 'abyssplug' - this driver mimics the frequency scaling behavior - in 'ondemand', but with several key differences. First is - that frequency transitions use the CPUFreq table directly, - instead of incrementing in a percentage of the maximum - available frequency. Second 'abyssplug' will offline auxillary - CPUs when the system is idle, and online those CPUs once the - system becomes busy again. This last feature is needed for - architectures which transition to low power states when only - the "master" CPU is online, or for thermally constrained - devices. - If you don't have one of these architectures or devices, use - 'ondemand' instead. - If in doubt, say N. - -config CPU_FREQ_GOV_ADAPTIVE - tristate "'adaptive' cpufreq policy governor" - help - 'adaptive' - This driver adds a dynamic cpufreq policy governor - designed for latency-sensitive workloads and also for demanding - performance. - - This governor attempts to reduce the latency of clock - increases so that the system is more responsive to - interactive workloads in loweset steady-state but to - to reduce power consumption in middle operation level level up - will be done in step by step to prohibit system from going to - max operation level. - config CPU_FREQ_GOV_USERSPACE tristate "'userspace' governor for userspace frequency scaling" help @@ -335,10 +250,6 @@ config CPU_FREQ_GOV_INTELLIDEMAND If in doubt, say N. -config CPU_FREQ_GOV_NIGHTMARE - tristate "'nightmare' cpufreq governor" - depends on CPU_FREQ - config CPU_FREQ_GOV_ONDEMAND tristate "'ondemand' cpufreq policy governor" select CPU_FREQ_TABLE @@ -368,9 +279,6 @@ config CPU_FREQ_GOV_PERFORMANCE If in doubt, say Y. -config CPU_FREQ_GOV_PEGASUSQ - tristate "'pegasusq' cpufreq policy governor" - config CPU_FREQ_GOV_POWERSAVE tristate "'powersave' governor" help @@ -385,12 +293,6 @@ config CPU_FREQ_GOV_POWERSAVE config CPU_FREQ_GOV_SLP tristate "'slp' cpufreq policy governor" -config CPU_FREQ_GOV_SMARTASSH3 - tristate "'smartassH3' cpufreq governor" - depends on CPU_FREQ - help - 'smartassH3' - a "smart" governor - config CPU_FREQ_GOV_USERSPACE tristate "'userspace' governor for userspace frequency scaling" help diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index cc2230ee..09c5ea20 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -9,11 +9,12 @@ obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o -obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o obj-$(CONFIG_CPU_FREQ_GOV_BADASS) += cpufreq_badass.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o -obj-$(CONFIG_CPU_FREQ_GOV_SMARTASSV2) += cpufreq_smartassv2.o +obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o +obj-$(CONFIG_CPU_FREQ_GOV_ASSWAX) += cpufreq_asswax.o +obj-$(CONFIG_CPU_FREQ_GOV_DANCEDANCE) += cpufreq_dancedance.o diff --git a/drivers/cpufreq/cpufreq_abyssplug.c b/drivers/cpufreq/cpufreq_abyssplug.c deleted file mode 100644 index 37df4463..00000000 --- a/drivers/cpufreq/cpufreq_abyssplug.c +++ /dev/null @@ -1,817 +0,0 @@ -/* - * CPUFreq AbyssPlug governor - * - * - * Based on hotplug governor - * Copyright (C) 2010 Texas Instruments, Inc. - * Mike Turquette - * Santosh Shilimkar - * - * Based on ondemand governor - * Copyright (C) 2001 Russell King - * (C) 2003 Venkatesh Pallipadi , - * Jun Nakajima - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* greater than 95% avg load across online CPUs increases frequency */ -#define DEFAULT_UP_FREQ_MIN_LOAD (95) - -/* Keep 10% of idle under the up threshold when decreasing the frequency */ -#define DEFAULT_FREQ_DOWN_DIFFERENTIAL (1) - -/* less than 40% avg load across online CPUs decreases frequency */ -#define DEFAULT_DOWN_FREQ_MAX_LOAD (40) - -/* default sampling period (uSec) is bogus; 10x ondemand's default for x86 */ -#define DEFAULT_SAMPLING_PERIOD (50000) - -/* default number of sampling periods to average before hotplug-in decision */ -#define DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS (5) - -/* default number of sampling periods to average before hotplug-out decision */ -#define DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS (20) - -static void do_dbs_timer(struct work_struct *work); -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event); -//static int hotplug_boost(struct cpufreq_policy *policy); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ABYSSPLUG -static -#endif -struct cpufreq_governor cpufreq_gov_abyssplug = { - .name = "abyssplug", - .governor = cpufreq_governor_dbs, - .owner = THIS_MODULE, -}; - -struct cpu_dbs_info_s { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - struct work_struct cpu_up_work; - struct work_struct cpu_down_work; - struct cpufreq_frequency_table *freq_table; - int cpu; - unsigned int boost_applied; - /* - * percpu mutex that serializes governor limit change with - * do_dbs_timer invocation. We do not want do_dbs_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, hp_cpu_dbs_info); - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - -/* - * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on - * different CPUs. It protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); - -static struct workqueue_struct *khotplug_wq; - -static struct dbs_tuners { - unsigned int sampling_rate; - unsigned int up_threshold; - unsigned int down_differential; - unsigned int down_threshold; - unsigned int hotplug_in_sampling_periods; - unsigned int hotplug_out_sampling_periods; - unsigned int hotplug_load_index; - unsigned int *hotplug_load_history; - unsigned int ignore_nice; - unsigned int io_is_busy; - unsigned int boost_timeout; -} dbs_tuners_ins = { - .sampling_rate = DEFAULT_SAMPLING_PERIOD, - .up_threshold = DEFAULT_UP_FREQ_MIN_LOAD, - .down_differential = DEFAULT_FREQ_DOWN_DIFFERENTIAL, - .down_threshold = DEFAULT_DOWN_FREQ_MAX_LOAD, - .hotplug_in_sampling_periods = DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS, - .hotplug_out_sampling_periods = DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS, - .hotplug_load_index = 0, - .ignore_nice = 0, - .io_is_busy = 0, - .boost_timeout = 0, -}; - -/* - * A corner case exists when switching io_is_busy at run-time: comparing idle - * times from a non-io_is_busy period to an io_is_busy period (or vice-versa) - * will misrepresent the actual change in system idleness. We ignore this - * corner case: enabling io_is_busy might cause freq increase and disabling - * might cause freq decrease, which probably matches the original intent. - */ -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) -{ - u64 idle_time; - u64 iowait_time; - - /* cpufreq-abyssplug always assumes CONFIG_NO_HZ */ - idle_time = get_cpu_idle_time_us(cpu, wall); - - /* add time spent doing I/O to idle time */ - if (dbs_tuners_ins.io_is_busy) { - iowait_time = get_cpu_iowait_time_us(cpu, wall); - /* cpufreq-abyssplug always assumes CONFIG_NO_HZ */ - if (iowait_time != -1ULL && idle_time >= iowait_time) - idle_time -= iowait_time; - } - - return idle_time; -} - -/************************** sysfs interface ************************/ - -/* XXX look at global sysfs macros in cpufreq.h, can those be used here? */ - -/* cpufreq_abyssplug Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(up_threshold, up_threshold); -show_one(down_differential, down_differential); -show_one(down_threshold, down_threshold); -show_one(hotplug_in_sampling_periods, hotplug_in_sampling_periods); -show_one(hotplug_out_sampling_periods, hotplug_out_sampling_periods); -show_one(ignore_nice_load, ignore_nice); -show_one(io_is_busy, io_is_busy); -show_one(boost_timeout, boost_timeout); - -static ssize_t store_boost_timeout(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.boost_timeout = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.sampling_rate = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input <= dbs_tuners_ins.down_threshold) { - return -EINVAL; - } - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.up_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_down_differential(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input >= dbs_tuners_ins.up_threshold) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.down_differential = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input >= dbs_tuners_ins.up_threshold) { - return -EINVAL; - } - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.down_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_hotplug_in_sampling_periods(struct kobject *a, - struct attribute *b, const char *buf, size_t count) -{ - unsigned int input; - unsigned int *temp; - unsigned int max_windows; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - /* already using this value, bail out */ - if (input == dbs_tuners_ins.hotplug_in_sampling_periods) - return count; - - mutex_lock(&dbs_mutex); - ret = count; - max_windows = max(dbs_tuners_ins.hotplug_in_sampling_periods, - dbs_tuners_ins.hotplug_out_sampling_periods); - - /* no need to resize array */ - if (input <= max_windows) { - dbs_tuners_ins.hotplug_in_sampling_periods = input; - goto out; - } - - /* resize array */ - temp = kmalloc((sizeof(unsigned int) * input), GFP_KERNEL); - - if (!temp || IS_ERR(temp)) { - ret = -ENOMEM; - goto out; - } - - memcpy(temp, dbs_tuners_ins.hotplug_load_history, - (max_windows * sizeof(unsigned int))); - kfree(dbs_tuners_ins.hotplug_load_history); - - /* replace old buffer, old number of sampling periods & old index */ - dbs_tuners_ins.hotplug_load_history = temp; - dbs_tuners_ins.hotplug_in_sampling_periods = input; - dbs_tuners_ins.hotplug_load_index = max_windows; -out: - mutex_unlock(&dbs_mutex); - - return ret; -} - -static ssize_t store_hotplug_out_sampling_periods(struct kobject *a, - struct attribute *b, const char *buf, size_t count) -{ - unsigned int input; - unsigned int *temp; - unsigned int max_windows; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - /* already using this value, bail out */ - if (input == dbs_tuners_ins.hotplug_out_sampling_periods) - return count; - - mutex_lock(&dbs_mutex); - ret = count; - max_windows = max(dbs_tuners_ins.hotplug_in_sampling_periods, - dbs_tuners_ins.hotplug_out_sampling_periods); - - /* no need to resize array */ - if (input <= max_windows) { - dbs_tuners_ins.hotplug_out_sampling_periods = input; - goto out; - } - - /* resize array */ - temp = kmalloc((sizeof(unsigned int) * input), GFP_KERNEL); - - if (!temp || IS_ERR(temp)) { - ret = -ENOMEM; - goto out; - } - - memcpy(temp, dbs_tuners_ins.hotplug_load_history, - (max_windows * sizeof(unsigned int))); - kfree(dbs_tuners_ins.hotplug_load_history); - - /* replace old buffer, old number of sampling periods & old index */ - dbs_tuners_ins.hotplug_load_history = temp; - dbs_tuners_ins.hotplug_out_sampling_periods = input; - dbs_tuners_ins.hotplug_load_index = max_windows; -out: - mutex_unlock(&dbs_mutex); - - return ret; -} - -static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - mutex_lock(&dbs_mutex); - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - mutex_unlock(&dbs_mutex); - return count; - } - dbs_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(hp_cpu_dbs_info, j); - dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - - } - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.io_is_busy = !!input; - mutex_unlock(&dbs_mutex); - - return count; -} - -define_one_global_rw(sampling_rate); -define_one_global_rw(up_threshold); -define_one_global_rw(down_differential); -define_one_global_rw(down_threshold); -define_one_global_rw(hotplug_in_sampling_periods); -define_one_global_rw(hotplug_out_sampling_periods); -define_one_global_rw(ignore_nice_load); -define_one_global_rw(io_is_busy); -define_one_global_rw(boost_timeout); - -static struct attribute *dbs_attributes[] = { - &sampling_rate.attr, - &up_threshold.attr, - &down_differential.attr, - &down_threshold.attr, - &hotplug_in_sampling_periods.attr, - &hotplug_out_sampling_periods.attr, - &ignore_nice_load.attr, - &io_is_busy.attr, - &boost_timeout.attr, - NULL -}; - -static struct attribute_group dbs_attr_group = { - .attrs = dbs_attributes, - .name = "abyssplug", -}; - -/************************** sysfs end ************************/ - -static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) -{ - /* combined load of all enabled CPUs */ - unsigned int total_load = 0; - /* single largest CPU load percentage*/ - unsigned int max_load = 0; - /* largest CPU load in terms of frequency */ - unsigned int max_load_freq = 0; - /* average load across all enabled CPUs */ - unsigned int avg_load = 0; - /* average load across multiple sampling periods for hotplug events */ - unsigned int hotplug_in_avg_load = 0; - unsigned int hotplug_out_avg_load = 0; - /* number of sampling periods averaged for hotplug decisions */ - unsigned int periods; - - struct cpufreq_policy *policy; - unsigned int i, j; - - policy = this_dbs_info->cur_policy; - - /* - * cpu load accounting - * get highest load, total load and average load across all CPUs - */ - for_each_cpu(j, policy->cpus) { - unsigned int load; - unsigned int idle_time, wall_time; - cputime64_t cur_wall_time, cur_idle_time; - struct cpu_dbs_info_s *j_dbs_info; - - j_dbs_info = &per_cpu(hp_cpu_dbs_info, j); - - /* update both cur_idle_time and cur_wall_time */ - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - - /* how much wall time has passed since last iteration? */ - wall_time = (unsigned int) cputime64_sub(cur_wall_time, - j_dbs_info->prev_cpu_wall); - j_dbs_info->prev_cpu_wall = cur_wall_time; - - /* how much idle time has passed since last iteration? */ - idle_time = (unsigned int) cputime64_sub(cur_idle_time, - j_dbs_info->prev_cpu_idle); - j_dbs_info->prev_cpu_idle = cur_idle_time; - - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - /* load is the percentage of time not spent in idle */ - load = 100 * (wall_time - idle_time) / wall_time; - - /* keep track of combined load across all CPUs */ - total_load += load; - - /* keep track of highest single load across all CPUs */ - if (load > max_load) - max_load = load; - } - - /* use the max load in the OPP freq change policy */ - max_load_freq = max_load * policy->cur; - - /* calculate the average load across all related CPUs */ - avg_load = total_load / num_online_cpus(); - - mutex_lock(&dbs_mutex); - - /* - * hotplug load accounting - * average load over multiple sampling periods - */ - - /* how many sampling periods do we use for hotplug decisions? */ - periods = max(dbs_tuners_ins.hotplug_in_sampling_periods, - dbs_tuners_ins.hotplug_out_sampling_periods); - - /* store avg_load in the circular buffer */ - dbs_tuners_ins.hotplug_load_history[dbs_tuners_ins.hotplug_load_index] - = avg_load; - - /* compute average load across in & out sampling periods */ - for (i = 0, j = dbs_tuners_ins.hotplug_load_index; - i < periods; i++, j--) { - if (i < dbs_tuners_ins.hotplug_in_sampling_periods) - hotplug_in_avg_load += - dbs_tuners_ins.hotplug_load_history[j]; - if (i < dbs_tuners_ins.hotplug_out_sampling_periods) - hotplug_out_avg_load += - dbs_tuners_ins.hotplug_load_history[j]; - - if (j == 0) - j = periods; - } - - hotplug_in_avg_load = hotplug_in_avg_load / - dbs_tuners_ins.hotplug_in_sampling_periods; - - hotplug_out_avg_load = hotplug_out_avg_load / - dbs_tuners_ins.hotplug_out_sampling_periods; - - /* return to first element if we're at the circular buffer's end */ - if (++dbs_tuners_ins.hotplug_load_index == periods) - dbs_tuners_ins.hotplug_load_index = 0; - - /* check if auxiliary CPU is needed based on avg_load */ - if (avg_load > dbs_tuners_ins.up_threshold) { - /* should we enable auxillary CPUs? */ - if (num_online_cpus() < 2 && hotplug_in_avg_load > - dbs_tuners_ins.up_threshold) { - queue_work_on(this_dbs_info->cpu, khotplug_wq, - &this_dbs_info->cpu_up_work); - goto out; - } - } - - /* check for frequency increase based on max_load */ - if (max_load > dbs_tuners_ins.up_threshold) { - /* increase to highest frequency supported */ - if (policy->cur < policy->max) - __cpufreq_driver_target(policy, policy->max, - CPUFREQ_RELATION_H); - - goto out; - } - - /* check for frequency decrease */ - if (avg_load < dbs_tuners_ins.down_threshold) { - /* are we at the minimum frequency already? */ - if (policy->cur <= policy->min) { - /* should we disable auxillary CPUs? */ - if (num_online_cpus() > 1 && hotplug_out_avg_load < - dbs_tuners_ins.down_threshold) { - queue_work_on(this_dbs_info->cpu, khotplug_wq, - &this_dbs_info->cpu_down_work); - } - goto out; - } - } - - /* - * go down to the lowest frequency which can sustain the load by - * keeping 30% of idle in order to not cross the up_threshold - */ - if ((max_load_freq < - (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * - policy->cur) && (policy->cur > policy->min)) { - unsigned int freq_next; - freq_next = max_load_freq / - (dbs_tuners_ins.up_threshold - - dbs_tuners_ins.down_differential); - - if (freq_next < policy->min) - freq_next = policy->min; - - __cpufreq_driver_target(policy, freq_next, - CPUFREQ_RELATION_L); - } -out: - mutex_unlock(&dbs_mutex); - return; -} - -static void __cpuinit do_cpu_up(struct work_struct *work) -{ - cpu_up(1); -} - -static void __cpuinit do_cpu_down(struct work_struct *work) -{ - cpu_down(1); -} - -static void do_dbs_timer(struct work_struct *work) -{ - struct cpu_dbs_info_s *dbs_info = - container_of(work, struct cpu_dbs_info_s, work.work); - unsigned int cpu = dbs_info->cpu; - int delay = 0; - - mutex_lock(&dbs_info->timer_mutex); - if (!dbs_info->boost_applied) { - dbs_check_cpu(dbs_info); - /* We want all related CPUs to do sampling nearly on same jiffy */ - delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - } else { - delay = usecs_to_jiffies(dbs_tuners_ins.boost_timeout); - dbs_info->boost_applied = 0; - if (num_online_cpus() < 2) - queue_work_on(cpu, khotplug_wq, - &dbs_info->cpu_up_work); - } - queue_delayed_work_on(cpu, khotplug_wq, &dbs_info->work, delay); - mutex_unlock(&dbs_info->timer_mutex); -} - -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) -{ - /* We want all related CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - delay -= jiffies % delay; - - INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - if (!dbs_info->boost_applied) - delay = usecs_to_jiffies(dbs_tuners_ins.boost_timeout); - queue_delayed_work_on(dbs_info->cpu, khotplug_wq, &dbs_info->work, - delay); -} - -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) -{ - cancel_delayed_work_sync(&dbs_info->work); -} - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int i, j, max_periods; - int rc; - - this_dbs_info = &per_cpu(hp_cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_enable++; - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(hp_cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) { - j_dbs_info->prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - - max_periods = max(DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS, - DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS); - dbs_tuners_ins.hotplug_load_history = kmalloc( - (sizeof(unsigned int) * max_periods), - GFP_KERNEL); - if (!dbs_tuners_ins.hotplug_load_history) { - WARN_ON(1); - return -ENOMEM; - } - for (i = 0; i < max_periods; i++) - dbs_tuners_ins.hotplug_load_history[i] = 50; - } - this_dbs_info->cpu = cpu; - this_dbs_info->freq_table = cpufreq_frequency_get_table(cpu); - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - rc = sysfs_create_group(cpufreq_global_kobject, - &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - } - if (!dbs_tuners_ins.boost_timeout) - dbs_tuners_ins.boost_timeout = dbs_tuners_ins.sampling_rate * 30; - mutex_unlock(&dbs_mutex); - - mutex_init(&this_dbs_info->timer_mutex); - dbs_timer_init(this_dbs_info); - break; - - case CPUFREQ_GOV_STOP: - dbs_timer_exit(this_dbs_info); - - mutex_lock(&dbs_mutex); - mutex_destroy(&this_dbs_info->timer_mutex); - dbs_enable--; - mutex_unlock(&dbs_mutex); - if (!dbs_enable) - sysfs_remove_group(cpufreq_global_kobject, - &dbs_attr_group); - kfree(dbs_tuners_ins.hotplug_load_history); - /* - * XXX BIG CAVEAT: Stopping the governor with CPU1 offline - * will result in it remaining offline until the user onlines - * it again. It is up to the user to do this (for now). - */ - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_dbs_info->timer_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&this_dbs_info->timer_mutex); - break; - } - return 0; -} - -#if 0 -static int hotplug_boost(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - - this_dbs_info = &per_cpu(hp_cpu_dbs_info, cpu); - -#if 0 - /* Already at max? */ - if (policy->cur == policy->max) - return; -#endif - - mutex_lock(&this_dbs_info->timer_mutex); - this_dbs_info->boost_applied = 1; - __cpufreq_driver_target(policy, policy->max, - CPUFREQ_RELATION_H); - mutex_unlock(&this_dbs_info->timer_mutex); - - return 0; -} -#endif - -static int __init cpufreq_gov_dbs_init(void) -{ - int err; - cputime64_t wall; - u64 idle_time; - int cpu = get_cpu(); - struct cpu_dbs_info_s *dbs_info = &per_cpu(hp_cpu_dbs_info, 0); - - INIT_WORK(&dbs_info->cpu_up_work, do_cpu_up); - INIT_WORK(&dbs_info->cpu_down_work, do_cpu_down); - - idle_time = get_cpu_idle_time_us(cpu, &wall); - put_cpu(); - if (idle_time != -1ULL) { - dbs_tuners_ins.up_threshold = DEFAULT_UP_FREQ_MIN_LOAD; - } else { - pr_err("cpufreq-abyssplug: %s: assumes CONFIG_NO_HZ\n", - __func__); - return -EINVAL; - } - - khotplug_wq = create_workqueue("khotplug"); - if (!khotplug_wq) { - pr_err("Creation of khotplug failed\n"); - return -EFAULT; - } - err = cpufreq_register_governor(&cpufreq_gov_abyssplug); - if (err) - destroy_workqueue(khotplug_wq); - - return err; -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_abyssplug); - destroy_workqueue(khotplug_wq); -} - -MODULE_DESCRIPTION("'cpufreq_abyssplug' - cpufreq governor for dynamic frequency scaling and CPU hotplug"); -MODULE_LICENSE("GPL"); - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ABYSSPLUG -fs_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); -#endif -module_exit(cpufreq_gov_dbs_exit); - diff --git a/drivers/cpufreq/cpufreq_adaptive.c b/drivers/cpufreq/cpufreq_adaptive.c deleted file mode 100644 index 2eff3e28..00000000 --- a/drivers/cpufreq/cpufreq_adaptive.c +++ /dev/null @@ -1,952 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_adaptive.c - * - * Copyright (C) 2001 Russell King - * (C) 2003 Venkatesh Pallipadi . - * Jun Nakajima - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ - -#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) -#define DEF_FREQUENCY_UP_THRESHOLD (80) -#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) -#define MICRO_FREQUENCY_UP_THRESHOLD (95) -#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) -#define MIN_FREQUENCY_UP_THRESHOLD (11) -#define MAX_FREQUENCY_UP_THRESHOLD (100) -#define MIN_ONDEMAND_THRESHOLD (4) -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) - * this governor will not work. - * All times here are in uS. - */ -#define MIN_SAMPLING_RATE_RATIO (2) - -static unsigned int min_sampling_rate; - -#define LATENCY_MULTIPLIER (1000) -#define MIN_LATENCY_MULTIPLIER (100) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) - -static void (*pm_idle_old)(void); -static void do_dbs_timer(struct work_struct *work); -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ADAPTIVE -static -#endif -struct cpufreq_governor cpufreq_gov_adaptive = { - .name = "adaptive", - .governor = cpufreq_governor_dbs, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, -}; - -/* Sampling types */ -enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; - -struct cpu_dbs_info_s { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_iowait; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - struct cpufreq_frequency_table *freq_table; - unsigned int freq_hi_jiffies; - int cpu; - unsigned int sample_type:1; - bool ondemand; - /* - * percpu mutex that serializes governor limit change with - * do_dbs_timer invocation. We do not want do_dbs_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - -/* - * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on - * different CPUs. It protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); -static struct task_struct *up_task; -static struct workqueue_struct *down_wq; -static struct work_struct freq_scale_down_work; -static cpumask_t up_cpumask; -static spinlock_t up_cpumask_lock; -static cpumask_t down_cpumask; -static spinlock_t down_cpumask_lock; - -static DEFINE_PER_CPU(cputime64_t, idle_in_idle); -static DEFINE_PER_CPU(cputime64_t, idle_exit_wall); - -static struct timer_list cpu_timer; -static unsigned int target_freq; -static DEFINE_MUTEX(short_timer_mutex); - -/* Go to max speed when CPU load at or above this value. */ -#define DEFAULT_GO_MAXSPEED_LOAD 60 -static unsigned long go_maxspeed_load; - -#define DEFAULT_KEEP_MINSPEED_LOAD 30 -static unsigned long keep_minspeed_load; - -#define DEFAULT_STEPUP_LOAD 10 -static unsigned long step_up_load; - -static struct dbs_tuners { - unsigned int sampling_rate; - unsigned int up_threshold; - unsigned int down_differential; - unsigned int ignore_nice; - unsigned int io_is_busy; -} dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, - .ignore_nice = 0, -}; - -static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) -{ - u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); - - if (iowait_time == -1ULL) - return 0; - - return iowait_time; -} - -static void adaptive_init_cpu(int cpu) -{ - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - dbs_info->freq_table = cpufreq_frequency_get_table(cpu); -} - -/************************** sysfs interface ************************/ - -static ssize_t show_sampling_rate_max(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - printk_once(KERN_INFO "CPUFREQ: adaptive sampling_rate_max " - "sysfs file is deprecated - used by: %s\n", current->comm); - return sprintf(buf, "%u\n", -1U); -} - -static ssize_t show_sampling_rate_min(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", min_sampling_rate); -} - -define_one_global_ro(sampling_rate_max); -define_one_global_ro(sampling_rate_min); - -/* cpufreq_adaptive Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(io_is_busy, io_is_busy); -show_one(up_threshold, up_threshold); -show_one(ignore_nice_load, ignore_nice); - -/*** delete after deprecation time ***/ - -#define DEPRECATION_MSG(file_name) \ - printk_once(KERN_INFO "CPUFREQ: Per core adaptive sysfs " \ - "interface is deprecated - " #file_name "\n"); - -#define show_one_old(file_name) \ -static ssize_t show_##file_name##_old \ -(struct cpufreq_policy *unused, char *buf) \ -{ \ - printk_once(KERN_INFO "CPUFREQ: Per core adaptive sysfs " \ - "interface is deprecated - " #file_name "\n"); \ - return show_##file_name(NULL, NULL, buf); \ -} - -/*** delete after deprecation time ***/ - -static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.io_is_busy = !!input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || - input < MIN_FREQUENCY_UP_THRESHOLD) { - return -EINVAL; - } - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.up_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - mutex_lock(&dbs_mutex); - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - mutex_unlock(&dbs_mutex); - return count; - } - dbs_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(od_cpu_dbs_info, j); - dbs_info->prev_cpu_idle = get_cpu_idle_time_us(j, - &dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - - } - mutex_unlock(&dbs_mutex); - - return count; -} - -define_one_global_rw(sampling_rate); -define_one_global_rw(io_is_busy); -define_one_global_rw(up_threshold); -define_one_global_rw(ignore_nice_load); - -static struct attribute *dbs_attributes[] = { - &sampling_rate_max.attr, - &sampling_rate_min.attr, - &sampling_rate.attr, - &up_threshold.attr, - &ignore_nice_load.attr, - &io_is_busy.attr, - NULL -}; - -static struct attribute_group dbs_attr_group = { - .attrs = dbs_attributes, - .name = "adaptive", -}; - -/*** delete after deprecation time ***/ - -#define write_one_old(file_name) \ -static ssize_t store_##file_name##_old \ -(struct cpufreq_policy *unused, const char *buf, size_t count) \ -{ \ - printk_once(KERN_INFO "CPUFREQ: Per core adaptive sysfs " \ - "interface is deprecated - " #file_name "\n"); \ - return store_##file_name(NULL, NULL, buf, count); \ -} - -static void cpufreq_adaptive_timer(unsigned long data) -{ - cputime64_t cur_idle; - cputime64_t cur_wall; - unsigned int delta_idle; - unsigned int delta_time; - int short_load; - unsigned int new_freq; - unsigned long flags; - struct cpu_dbs_info_s *this_dbs_info; - struct cpufreq_policy *policy; - unsigned int j; - unsigned int index; - unsigned int max_load = 0; - - this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); - - policy = this_dbs_info->cur_policy; - - for_each_online_cpu(j) { - cur_idle = get_cpu_idle_time_us(j, &cur_wall); - - delta_idle = (unsigned int) cputime64_sub(cur_idle, - per_cpu(idle_in_idle, j)); - delta_time = (unsigned int) cputime64_sub(cur_wall, - per_cpu(idle_exit_wall, j)); - - /* - * If timer ran less than 1ms after short-term sample started, retry. - */ - if (delta_time < 1000) - goto do_nothing; - - if (delta_idle > delta_time) - short_load = 0; - else - short_load = 100 * (delta_time - delta_idle) / delta_time; - - if (short_load > max_load) - max_load = short_load; - } - - if (this_dbs_info->ondemand) - goto do_nothing; - - if (max_load >= go_maxspeed_load) - new_freq = policy->max; - else - new_freq = policy->max * max_load / 100; - - if ((max_load <= keep_minspeed_load) && - (policy->cur == policy->min)) - new_freq = policy->cur; - - if (cpufreq_frequency_table_target(policy, this_dbs_info->freq_table, - new_freq, CPUFREQ_RELATION_L, - &index)) { - goto do_nothing; - } - - new_freq = this_dbs_info->freq_table[index].frequency; - - target_freq = new_freq; - - if (new_freq < this_dbs_info->cur_policy->cur) { - spin_lock_irqsave(&down_cpumask_lock, flags); - cpumask_set_cpu(0, &down_cpumask); - spin_unlock_irqrestore(&down_cpumask_lock, flags); - queue_work(down_wq, &freq_scale_down_work); - } else { - spin_lock_irqsave(&up_cpumask_lock, flags); - cpumask_set_cpu(0, &up_cpumask); - spin_unlock_irqrestore(&up_cpumask_lock, flags); - wake_up_process(up_task); - } - - return; - -do_nothing: - for_each_online_cpu(j) { - per_cpu(idle_in_idle, j) = - get_cpu_idle_time_us(j, - &per_cpu(idle_exit_wall, j)); - } - mod_timer(&cpu_timer, jiffies + 2); - schedule_delayed_work_on(0, &this_dbs_info->work, 10); - - if (mutex_is_locked(&short_timer_mutex)) - mutex_unlock(&short_timer_mutex); - return; -} - -/*** delete after deprecation time ***/ - -/************************** sysfs end ************************/ - -static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) -{ -#ifndef CONFIG_ARCH_EXYNOS4 - if (p->cur == p->max) - return; -#endif - __cpufreq_driver_target(p, freq, CPUFREQ_RELATION_H); -} - -static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) -{ - unsigned int max_load_freq; - - struct cpufreq_policy *policy; - unsigned int j; - - unsigned int index, new_freq; - unsigned int longterm_load = 0; - - policy = this_dbs_info->cur_policy; - - /* - * Every sampling_rate, we check, if current idle time is less - * than 20% (default), then we try to increase frequency - * Every sampling_rate, we look for a the lowest - * frequency which can sustain the load while keeping idle time over - * 30%. If such a frequency exist, we try to decrease to this frequency. - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of current frequency - */ - - /* Get Absolute Load - in terms of freq */ - max_load_freq = 0; - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; - unsigned int idle_time, wall_time, iowait_time; - unsigned int load, load_freq; - int freq_avg; - - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - - cur_idle_time = get_cpu_idle_time_us(j, &cur_wall_time); - cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - - wall_time = (unsigned int) cputime64_sub(cur_wall_time, - j_dbs_info->prev_cpu_wall); - j_dbs_info->prev_cpu_wall = cur_wall_time; - - idle_time = (unsigned int) cputime64_sub(cur_idle_time, - j_dbs_info->prev_cpu_idle); - j_dbs_info->prev_cpu_idle = cur_idle_time; - - iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, - j_dbs_info->prev_cpu_iowait); - j_dbs_info->prev_cpu_iowait = cur_iowait_time; - - if (dbs_tuners_ins.ignore_nice) { - cputime64_t cur_nice; - unsigned long cur_nice_jiffies; - - cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - - j_dbs_info->prev_cpu_nice; - /* - * Assumption: nice time between sampling periods will - * be less than 2^32 jiffies for 32 bit sys - */ - cur_nice_jiffies = (unsigned long) - cputime64_to_jiffies64(cur_nice); - - j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - idle_time += jiffies_to_usecs(cur_nice_jiffies); - } - - /* - * For the purpose of adaptive, waiting for disk IO is an - * indication that you're performance critical, and not that - * the system is actually idle. So subtract the iowait time - * from the cpu idle time. - */ - - if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) - idle_time -= iowait_time; - - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - load = 100 * (wall_time - idle_time) / wall_time; - - if (load > longterm_load) - longterm_load = load; - - freq_avg = __cpufreq_driver_getavg(policy, j); - if (freq_avg <= 0) - freq_avg = policy->cur; - - load_freq = load * freq_avg; - - if (load_freq > max_load_freq) - max_load_freq = load_freq; - } - - if (longterm_load >= MIN_ONDEMAND_THRESHOLD) - this_dbs_info->ondemand = true; - else - this_dbs_info->ondemand = false; - - /* Check for frequency increase */ - if (max_load_freq > (dbs_tuners_ins.up_threshold * policy->cur)) { - cpufreq_frequency_table_target(policy, - this_dbs_info->freq_table, - (policy->cur + step_up_load), - CPUFREQ_RELATION_L, &index); - - new_freq = this_dbs_info->freq_table[index].frequency; - dbs_freq_increase(policy, new_freq); - return; - } - - /* Check for frequency decrease */ - /* if we cannot reduce the frequency anymore, break out early */ -#ifndef CONFIG_ARCH_EXYNOS4 - if (policy->cur == policy->min) - return; -#endif - /* - * The optimal frequency is the frequency that is the lowest that - * can support the current CPU usage without triggering the up - * policy. To be safe, we focus 10 points under the threshold. - */ - if (max_load_freq < - (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * - policy->cur) { - unsigned int freq_next; - freq_next = max_load_freq / - (dbs_tuners_ins.up_threshold - - dbs_tuners_ins.down_differential); - - if (freq_next < policy->min) - freq_next = policy->min; - - __cpufreq_driver_target(policy, freq_next, - CPUFREQ_RELATION_L); - } -} - -static void do_dbs_timer(struct work_struct *work) -{ - struct cpu_dbs_info_s *dbs_info = - container_of(work, struct cpu_dbs_info_s, work.work); - unsigned int cpu = dbs_info->cpu; - - int delay; - - mutex_lock(&dbs_info->timer_mutex); - - /* Common NORMAL_SAMPLE setup */ - dbs_info->sample_type = DBS_NORMAL_SAMPLE; - dbs_check_cpu(dbs_info); - - /* We want all CPUs to do sampling nearly on - * same jiffy - */ - delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - schedule_delayed_work_on(cpu, &dbs_info->work, delay); - - mutex_unlock(&dbs_info->timer_mutex); -} - -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) -{ - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - dbs_info->sample_type = DBS_NORMAL_SAMPLE; - INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); -} - -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) -{ - cancel_delayed_work_sync(&dbs_info->work); -} - -/* - * Not all CPUs want IO time to be accounted as busy; this dependson how - * efficient idling at a higher frequency/voltage is. - * Pavel Machek says this is not so for various generations of AMD and old - * Intel systems. - * Mike Chan (androidlcom) calis this is also not true for ARM. - * Because of this, whitelist specific known (series) of CPUs by default, and - * leave all others up to the user. - */ -static int should_io_be_busy(void) -{ -#if defined(CONFIG_X86) - /* - * For Intel, Core 2 (model 15) andl later have an efficient idle. - */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_data.x86 == 6 && - boot_cpu_data.x86_model >= 15) - return 1; -#endif - return 0; -} - -static void cpufreq_adaptive_idle(void) -{ - int i; - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, 0); - struct cpufreq_policy *policy; - - policy = dbs_info->cur_policy; - - pm_idle_old(); - - if ((policy->cur == policy->min) || - (policy->cur == policy->max)) { - - if (timer_pending(&cpu_timer)) - return; - - if (mutex_trylock(&short_timer_mutex)) { - for_each_online_cpu(i) { - per_cpu(idle_in_idle, i) = - get_cpu_idle_time_us(i, - &per_cpu(idle_exit_wall, i)); - } - - mod_timer(&cpu_timer, jiffies + 2); - cancel_delayed_work(&dbs_info->work); - } - } else { - if (timer_pending(&cpu_timer)) - del_timer(&cpu_timer); - - } -} - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - mutex_lock(&dbs_mutex); - - rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - dbs_enable++; - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle = get_cpu_idle_time_us(j, - &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) { - j_dbs_info->prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - } - this_dbs_info->cpu = cpu; - adaptive_init_cpu(cpu); - - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - unsigned int latency; - - rc = sysfs_create_group(cpufreq_global_kobject, - &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - /* Bring kernel and HW constraints together */ - min_sampling_rate = max(min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - dbs_tuners_ins.sampling_rate = - max(min_sampling_rate, - latency * LATENCY_MULTIPLIER); - dbs_tuners_ins.io_is_busy = should_io_be_busy(); - } - mutex_unlock(&dbs_mutex); - - mutex_init(&this_dbs_info->timer_mutex); - dbs_timer_init(this_dbs_info); - - pm_idle_old = pm_idle; - pm_idle = cpufreq_adaptive_idle; - break; - - case CPUFREQ_GOV_STOP: - dbs_timer_exit(this_dbs_info); - - mutex_lock(&dbs_mutex); - sysfs_remove_group(&policy->kobj, &dbs_attr_group); - mutex_destroy(&this_dbs_info->timer_mutex); - dbs_enable--; - mutex_unlock(&dbs_mutex); - if (!dbs_enable) - sysfs_remove_group(cpufreq_global_kobject, - &dbs_attr_group); - - pm_idle = pm_idle_old; - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_dbs_info->timer_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&this_dbs_info->timer_mutex); - break; - } - return 0; -} - -static inline void cpufreq_adaptive_update_time(void) -{ - struct cpu_dbs_info_s *this_dbs_info; - struct cpufreq_policy *policy; - int j; - - this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); - policy = this_dbs_info->cur_policy; - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; - - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - - cur_idle_time = get_cpu_idle_time_us(j, &cur_wall_time); - cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - - j_dbs_info->prev_cpu_wall = cur_wall_time; - - j_dbs_info->prev_cpu_idle = cur_idle_time; - - j_dbs_info->prev_cpu_iowait = cur_iowait_time; - - if (dbs_tuners_ins.ignore_nice) - j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - - } - -} - -static int cpufreq_adaptive_up_task(void *data) -{ - unsigned long flags; - struct cpu_dbs_info_s *this_dbs_info; - struct cpufreq_policy *policy; - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); - policy = this_dbs_info->cur_policy; - - while (1) { - set_current_state(TASK_INTERRUPTIBLE); - spin_lock_irqsave(&up_cpumask_lock, flags); - - if (cpumask_empty(&up_cpumask)) { - spin_unlock_irqrestore(&up_cpumask_lock, flags); - schedule(); - - if (kthread_should_stop()) - break; - - spin_lock_irqsave(&up_cpumask_lock, flags); - } - - set_current_state(TASK_RUNNING); - - cpumask_clear(&up_cpumask); - spin_unlock_irqrestore(&up_cpumask_lock, flags); - - __cpufreq_driver_target(this_dbs_info->cur_policy, - target_freq, - CPUFREQ_RELATION_H); - if (policy->cur != policy->max) { - mutex_lock(&this_dbs_info->timer_mutex); - - schedule_delayed_work_on(0, &this_dbs_info->work, delay); - mutex_unlock(&this_dbs_info->timer_mutex); - cpufreq_adaptive_update_time(); - } - if (mutex_is_locked(&short_timer_mutex)) - mutex_unlock(&short_timer_mutex); - } - - return 0; -} - -static void cpufreq_adaptive_freq_down(struct work_struct *work) -{ - unsigned long flags; - struct cpu_dbs_info_s *this_dbs_info; - struct cpufreq_policy *policy; - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - spin_lock_irqsave(&down_cpumask_lock, flags); - cpumask_clear(&down_cpumask); - spin_unlock_irqrestore(&down_cpumask_lock, flags); - - this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); - policy = this_dbs_info->cur_policy; - - __cpufreq_driver_target(this_dbs_info->cur_policy, - target_freq, - CPUFREQ_RELATION_H); - - if (policy->cur != policy->min) { - mutex_lock(&this_dbs_info->timer_mutex); - - schedule_delayed_work_on(0, &this_dbs_info->work, delay); - mutex_unlock(&this_dbs_info->timer_mutex); - cpufreq_adaptive_update_time(); - } - - if (mutex_is_locked(&short_timer_mutex)) - mutex_unlock(&short_timer_mutex); -} - -static int __init cpufreq_gov_dbs_init(void) -{ - cputime64_t wall; - u64 idle_time; - int cpu = get_cpu(); - - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD; - keep_minspeed_load = DEFAULT_KEEP_MINSPEED_LOAD; - step_up_load = DEFAULT_STEPUP_LOAD; - - idle_time = get_cpu_idle_time_us(cpu, &wall); - put_cpu(); - if (idle_time != -1ULL) { - /* Idle micro accounting is supported. Use finer thresholds */ - dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; - dbs_tuners_ins.down_differential = - MICRO_FREQUENCY_DOWN_DIFFERENTIAL; - /* - * In no_hz/micro accounting case we set the minimum frequency - * not depending on HZ, but fixed (very low). The deferred - * timer might skip some samples if idle/sleeping as needed. - */ - min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; - } else { - /* For correct statistics, we need 10 ticks for each measure */ - min_sampling_rate = - MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); - } - - init_timer(&cpu_timer); - cpu_timer.function = cpufreq_adaptive_timer; - - up_task = kthread_create(cpufreq_adaptive_up_task, NULL, - "kadaptiveup"); - - if (IS_ERR(up_task)) - return PTR_ERR(up_task); - - sched_setscheduler_nocheck(up_task, SCHED_FIFO, ¶m); - get_task_struct(up_task); - - /* No rescuer thread, bind to CPU queuing the work for possibly - warm cache (probably doesn't matter much). */ - down_wq = alloc_workqueue("kadaptive_down", 0, 1); - - if (!down_wq) - goto err_freeuptask; - - INIT_WORK(&freq_scale_down_work, cpufreq_adaptive_freq_down); - - - return cpufreq_register_governor(&cpufreq_gov_adaptive); -err_freeuptask: - put_task_struct(up_task); - return -ENOMEM; -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_adaptive); -} - - -MODULE_AUTHOR("Venkatesh Pallipadi "); -MODULE_AUTHOR("Alexey Starikovskiy "); -MODULE_DESCRIPTION("'cpufreq_adaptive' - A dynamic cpufreq governor for " - "Low Latency Frequency Transition capable processors"); -MODULE_LICENSE("GPL"); - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ADAPTIVE -fs_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); -#endif -module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_nightmare.c b/drivers/cpufreq/cpufreq_nightmare.c deleted file mode 100644 index ece971ca..00000000 --- a/drivers/cpufreq/cpufreq_nightmare.c +++ /dev/null @@ -1,1656 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_nightmare.c - * - * Copyright (C) 2011 Samsung Electronics co. ltd - * ByungChang Cha - * - * Based on ondemand governor - * Copyright (C) 2001 Russell King - * (C) 2003 Venkatesh Pallipadi . - * Jun Nakajima - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Created by Alucard_24@xda - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef CONFIG_HAS_EARLYSUSPEND -#include -#endif -#define EARLYSUSPEND_HOTPLUGLOCK 1 - -/* - * runqueue average - */ - -#define RQ_AVG_TIMER_RATE 10 - -struct runqueue_data { - unsigned int nr_run_avg; - unsigned int update_rate; - int64_t last_time; - int64_t total_time; - struct delayed_work work; - struct workqueue_struct *nr_run_wq; - spinlock_t lock; -}; - -static struct runqueue_data *rq_data; -static void rq_work_fn(struct work_struct *work); - -static void start_rq_work(void) -{ - rq_data->nr_run_avg = 0; - rq_data->last_time = 0; - rq_data->total_time = 0; - if (rq_data->nr_run_wq == NULL) - rq_data->nr_run_wq = - create_singlethread_workqueue("nr_run_avg"); - - queue_delayed_work(rq_data->nr_run_wq, &rq_data->work, - msecs_to_jiffies(rq_data->update_rate)); - return; -} - -static void stop_rq_work(void) -{ - if (rq_data->nr_run_wq) - cancel_delayed_work(&rq_data->work); - return; -} - -static int __init init_rq_avg(void) -{ - rq_data = kzalloc(sizeof(struct runqueue_data), GFP_KERNEL); - if (rq_data == NULL) { - pr_err("%s cannot allocate memory\n", __func__); - return -ENOMEM; - } - spin_lock_init(&rq_data->lock); - rq_data->update_rate = RQ_AVG_TIMER_RATE; - INIT_DELAYED_WORK_DEFERRABLE(&rq_data->work, rq_work_fn); - - return 0; -} - -static void rq_work_fn(struct work_struct *work) -{ - int64_t time_diff = 0; - int64_t nr_run = 0; - unsigned long flags = 0; - int64_t cur_time = ktime_to_ns(ktime_get()); - - spin_lock_irqsave(&rq_data->lock, flags); - - if (rq_data->last_time == 0) - rq_data->last_time = cur_time; - if (rq_data->nr_run_avg == 0) - rq_data->total_time = 0; - - nr_run = nr_running() * 100; - time_diff = cur_time - rq_data->last_time; - do_div(time_diff, 1000 * 1000); - - if (time_diff != 0 && rq_data->total_time != 0) { - nr_run = (nr_run * time_diff) + - (rq_data->nr_run_avg * rq_data->total_time); - do_div(nr_run, rq_data->total_time + time_diff); - } - rq_data->nr_run_avg = nr_run; - rq_data->total_time += time_diff; - rq_data->last_time = cur_time; - - if (rq_data->update_rate != 0) - queue_delayed_work(rq_data->nr_run_wq, &rq_data->work, - msecs_to_jiffies(rq_data->update_rate)); - - spin_unlock_irqrestore(&rq_data->lock, flags); -} - -static unsigned int get_nr_run_avg(void) -{ - unsigned int nr_run_avg; - unsigned long flags = 0; - - spin_lock_irqsave(&rq_data->lock, flags); - nr_run_avg = rq_data->nr_run_avg; - rq_data->nr_run_avg = 0; - spin_unlock_irqrestore(&rq_data->lock, flags); - - return nr_run_avg; -} - - -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ - -#define DEF_SAMPLING_UP_FACTOR (1) -#define MAX_SAMPLING_UP_FACTOR (100000) -#define DEF_SAMPLING_DOWN_FACTOR (2) -#define MAX_SAMPLING_DOWN_FACTOR (100000) -#define DEF_FREQ_STEP_DEC (5) - -#define DEF_SAMPLING_RATE (60000) -#define MIN_SAMPLING_RATE (10000) -#define MAX_HOTPLUG_RATE (40u) - -#define DEF_MAX_CPU_LOCK (0) -#define DEF_MIN_CPU_LOCK (0) -#define DEF_UP_NR_CPUS (1) -#define DEF_CPU_UP_RATE (10) -#define DEF_CPU_DOWN_RATE (20) -#define DEF_FREQ_STEP (30) - -#define DEF_START_DELAY (0) - -#define FREQ_FOR_RESPONSIVENESS (918000) - -#define HOTPLUG_DOWN_INDEX (0) -#define HOTPLUG_UP_INDEX (1) - -/* CPU freq will be increased if measured load > inc_cpu_load;*/ -#define DEF_INC_CPU_LOAD (80) -#define INC_CPU_LOAD_AT_MIN_FREQ (40) -#define UP_AVG_LOAD (65u) -/* CPU freq will be decreased if measured load < dec_cpu_load;*/ -#define DEF_DEC_CPU_LOAD (60) -#define DOWN_AVG_LOAD (30u) -#define DEF_FREQ_UP_BRAKE (5u) -#define DEF_HOTPLUG_COMPARE_LEVEL (0u) - -#ifdef CONFIG_MACH_MIDAS -static int hotplug_rq[4][2] = { - {0, 100}, {100, 200}, {200, 300}, {300, 0} -}; - -static int hotplug_freq[4][2] = { - {0, 540000}, - {378000, 540000}, - {378000, 540000}, - {378000, 0} -}; -#else -static int hotplug_rq[4][2] = { - {0, 100}, {100, 200}, {200, 300}, {300, 0} -}; - -static int hotplug_freq[4][2] = { - {0, 540000}, - {378000, 540000}, - {378000, 540000}, - {378000, 0} -}; -#endif - -static unsigned int min_sampling_rate; - -static void do_dbs_timer(struct work_struct *work); -static int cpufreq_governor_nightmare(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_NIGHTMARE -static -#endif -struct cpufreq_governor cpufreq_gov_nightmare = { - .name = "nightmare", - .governor = cpufreq_governor_nightmare, - .owner = THIS_MODULE, -}; - -/* Sampling types */ -enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; - -struct cpufreq_nightmare_cpuinfo { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_iowait; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - struct work_struct up_work; - struct work_struct down_work; - struct cpufreq_frequency_table *freq_table; - unsigned int freq_table_maxsize; - unsigned int avg_rate_mult; - int cpu; - /* - * percpu mutex that serializes governor limit change with - * do_dbs_timer invocation. We do not want do_dbs_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpufreq_nightmare_cpuinfo, od_cpu_dbs_info); - -struct workqueue_struct *dvfs_workqueues; - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - - -/* - * dbs_mutex protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); - -static struct dbs_tuners { - unsigned int sampling_rate; - unsigned int freq_step_dec; - unsigned int ignore_nice; - unsigned int sampling_down_factor; - unsigned int io_is_busy; - /* nightmare tuners */ - unsigned int freq_step; - unsigned int cpu_up_rate; - unsigned int cpu_down_rate; - unsigned int up_nr_cpus; - unsigned int max_cpu_lock; - unsigned int min_cpu_lock; - atomic_t hotplug_lock; - unsigned int dvfs_debug; - unsigned int max_freq; - unsigned int min_freq; -#ifdef CONFIG_HAS_EARLYSUSPEND - int early_suspend; -#endif - unsigned int inc_cpu_load_at_min_freq; - unsigned int freq_for_responsiveness; - unsigned int inc_cpu_load; - unsigned int dec_cpu_load; - unsigned int up_avg_load; - unsigned int down_avg_load; - unsigned int sampling_up_factor; - unsigned int freq_up_brake; - unsigned int hotplug_compare_level; -} dbs_tuners_ins = { - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, - .freq_step_dec = DEF_FREQ_STEP_DEC, - .ignore_nice = 0, - .freq_step = DEF_FREQ_STEP, - .cpu_up_rate = DEF_CPU_UP_RATE, - .cpu_down_rate = DEF_CPU_DOWN_RATE, - .up_nr_cpus = DEF_UP_NR_CPUS, - .max_cpu_lock = DEF_MAX_CPU_LOCK, - .min_cpu_lock = DEF_MIN_CPU_LOCK, - .hotplug_lock = ATOMIC_INIT(0), - .dvfs_debug = 0, -#ifdef CONFIG_HAS_EARLYSUSPEND - .early_suspend = -1, -#endif - .inc_cpu_load_at_min_freq = INC_CPU_LOAD_AT_MIN_FREQ, - .freq_for_responsiveness = FREQ_FOR_RESPONSIVENESS, - .inc_cpu_load = DEF_INC_CPU_LOAD, - .dec_cpu_load = DEF_DEC_CPU_LOAD, - .up_avg_load = UP_AVG_LOAD, - .down_avg_load = DOWN_AVG_LOAD, - .sampling_up_factor = DEF_SAMPLING_UP_FACTOR, - .freq_up_brake = DEF_FREQ_UP_BRAKE, - .hotplug_compare_level = DEF_HOTPLUG_COMPARE_LEVEL, -}; - - -/* - * CPU hotplug lock interface - */ - -static atomic_t g_hotplug_count = ATOMIC_INIT(0); -static atomic_t g_hotplug_lock = ATOMIC_INIT(0); - -static void apply_hotplug_lock(void) -{ - int online, possible, lock, flag; - struct work_struct *work; - struct cpufreq_nightmare_cpuinfo *dbs_info; - - /* do turn_on/off cpus */ - dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ - online = num_online_cpus(); - possible = num_possible_cpus(); - lock = atomic_read(&g_hotplug_lock); - flag = lock - online; - - if (lock == 0 || flag == 0) - return; - - work = flag > 0 ? &dbs_info->up_work : &dbs_info->down_work; - - pr_debug("%s online %d possible %d lock %d flag %d %d\n", - __func__, online, possible, lock, flag, (int)abs(flag)); - - queue_work_on(dbs_info->cpu, dvfs_workqueues, work); -} - -int cpufreq_nightmare_cpu_lock(int num_core) -{ - int prev_lock; - - if (num_core < 1 || num_core > num_possible_cpus()) - return -EINVAL; - - prev_lock = atomic_read(&g_hotplug_lock); - - if (prev_lock != 0 && prev_lock < num_core) - return -EINVAL; - else if (prev_lock == num_core) - atomic_inc(&g_hotplug_count); - - atomic_set(&g_hotplug_lock, num_core); - atomic_set(&g_hotplug_count, 1); - apply_hotplug_lock(); - - return 0; -} - -int cpufreq_nightmare_cpu_unlock(int num_core) -{ - int prev_lock = atomic_read(&g_hotplug_lock); - - if (prev_lock < num_core) - return 0; - else if (prev_lock == num_core) - atomic_dec(&g_hotplug_count); - - if (atomic_read(&g_hotplug_count) == 0) - atomic_set(&g_hotplug_lock, 0); - - return 0; -} - -void cpufreq_nightmare_min_cpu_lock(unsigned int num_core) -{ - int online, flag; - struct cpufreq_nightmare_cpuinfo *dbs_info; - - dbs_tuners_ins.min_cpu_lock = min(num_core, num_possible_cpus()); - - dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ - online = num_online_cpus(); - flag = (int)num_core - online; - if (flag <= 0) - return; - queue_work_on(dbs_info->cpu, dvfs_workqueues, &dbs_info->up_work); -} - -void cpufreq_nightmare_min_cpu_unlock(void) -{ - int online, lock, flag; - struct cpufreq_nightmare_cpuinfo *dbs_info; - - dbs_tuners_ins.min_cpu_lock = 0; - - dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ - online = num_online_cpus(); - lock = atomic_read(&g_hotplug_lock); - if (lock == 0) - return; - flag = lock - online; - if (flag >= 0) - return; - queue_work_on(dbs_info->cpu, dvfs_workqueues, &dbs_info->down_work); -} - -/* - * History of CPU usage - */ -struct cpu_usage { - unsigned int freq; - int load[NR_CPUS]; - unsigned int rq_avg; - unsigned int avg_load; -}; - -struct cpu_usage_history { - struct cpu_usage usage[MAX_HOTPLUG_RATE]; - unsigned int num_hist; -}; - -struct cpu_usage_history *hotplug_histories; - -static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) -{ - u64 idle_time; - u64 cur_wall_time; - u64 busy_time; - - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - - busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - - idle_time = cur_wall_time - busy_time; - if (wall) - *wall = jiffies_to_usecs(cur_wall_time); - - return jiffies_to_usecs(idle_time); -} - -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) -{ - u64 idle_time = get_cpu_idle_time_us(cpu, NULL); - - if (idle_time == -1ULL) - return get_cpu_idle_time_jiffy(cpu, wall); - else - idle_time += get_cpu_iowait_time_us(cpu, wall); - - return idle_time; -} - -static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, - cputime64_t *wall) -{ - u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); - - if (iowait_time == -1ULL) - return 0; - - return iowait_time; -} - -/************************** sysfs interface ************************/ - -static ssize_t show_sampling_rate_min(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", min_sampling_rate); -} - -define_one_global_ro(sampling_rate_min); - -/* cpufreq_nightmare Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(io_is_busy, io_is_busy); -show_one(sampling_down_factor, sampling_down_factor); -show_one(ignore_nice_load, ignore_nice); -show_one(freq_step_dec, freq_step_dec); -show_one(freq_step, freq_step); -show_one(cpu_up_rate, cpu_up_rate); -show_one(cpu_down_rate, cpu_down_rate); -show_one(up_nr_cpus, up_nr_cpus); -show_one(max_cpu_lock, max_cpu_lock); -show_one(min_cpu_lock, min_cpu_lock); -show_one(dvfs_debug, dvfs_debug); -show_one(inc_cpu_load_at_min_freq, inc_cpu_load_at_min_freq); -show_one(freq_for_responsiveness, freq_for_responsiveness); -show_one(inc_cpu_load, inc_cpu_load); -show_one(dec_cpu_load, dec_cpu_load); -show_one(up_avg_load, up_avg_load); -show_one(down_avg_load, down_avg_load); -show_one(sampling_up_factor, sampling_up_factor); -show_one(freq_up_brake, freq_up_brake); -show_one(hotplug_compare_level,hotplug_compare_level); - -static ssize_t show_hotplug_lock(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", atomic_read(&g_hotplug_lock)); -} - -#define show_hotplug_param(file_name, num_core, up_down) \ -static ssize_t show_##file_name##_##num_core##_##up_down \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", file_name[num_core - 1][up_down]); \ -} - -#define store_hotplug_param(file_name, num_core, up_down) \ -static ssize_t store_##file_name##_##num_core##_##up_down \ -(struct kobject *kobj, struct attribute *attr, \ - const char *buf, size_t count) \ -{ \ - unsigned int input; \ - int ret; \ - ret = sscanf(buf, "%u", &input); \ - if (ret != 1) \ - return -EINVAL; \ - file_name[num_core - 1][up_down] = input; \ - return count; \ -} - -show_hotplug_param(hotplug_freq, 1, 1); -show_hotplug_param(hotplug_freq, 2, 0); -#ifndef CONFIG_CPU_EXYNOS4210 -show_hotplug_param(hotplug_freq, 2, 1); -show_hotplug_param(hotplug_freq, 3, 0); -show_hotplug_param(hotplug_freq, 3, 1); -show_hotplug_param(hotplug_freq, 4, 0); -#endif - -show_hotplug_param(hotplug_rq, 1, 1); -show_hotplug_param(hotplug_rq, 2, 0); -#ifndef CONFIG_CPU_EXYNOS4210 -show_hotplug_param(hotplug_rq, 2, 1); -show_hotplug_param(hotplug_rq, 3, 0); -show_hotplug_param(hotplug_rq, 3, 1); -show_hotplug_param(hotplug_rq, 4, 0); -#endif - -store_hotplug_param(hotplug_freq, 1, 1); -store_hotplug_param(hotplug_freq, 2, 0); -#ifndef CONFIG_CPU_EXYNOS4210 -store_hotplug_param(hotplug_freq, 2, 1); -store_hotplug_param(hotplug_freq, 3, 0); -store_hotplug_param(hotplug_freq, 3, 1); -store_hotplug_param(hotplug_freq, 4, 0); -#endif - -store_hotplug_param(hotplug_rq, 1, 1); -store_hotplug_param(hotplug_rq, 2, 0); -#ifndef CONFIG_CPU_EXYNOS4210 -store_hotplug_param(hotplug_rq, 2, 1); -store_hotplug_param(hotplug_rq, 3, 0); -store_hotplug_param(hotplug_rq, 3, 1); -store_hotplug_param(hotplug_rq, 4, 0); -#endif - -define_one_global_rw(hotplug_freq_1_1); -define_one_global_rw(hotplug_freq_2_0); -#ifndef CONFIG_CPU_EXYNOS4210 -define_one_global_rw(hotplug_freq_2_1); -define_one_global_rw(hotplug_freq_3_0); -define_one_global_rw(hotplug_freq_3_1); -define_one_global_rw(hotplug_freq_4_0); -#endif - -define_one_global_rw(hotplug_rq_1_1); -define_one_global_rw(hotplug_rq_2_0); -#ifndef CONFIG_CPU_EXYNOS4210 -define_one_global_rw(hotplug_rq_2_1); -define_one_global_rw(hotplug_rq_3_0); -define_one_global_rw(hotplug_rq_3_1); -define_one_global_rw(hotplug_rq_4_0); -#endif - -static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); - return count; -} - -static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - dbs_tuners_ins.io_is_busy = !!input; - return count; -} - -static ssize_t store_sampling_down_factor(struct kobject *a, - struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) - return -EINVAL; - dbs_tuners_ins.sampling_down_factor = input; - - return count; -} - -static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - return count; - } - dbs_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle */ - for_each_online_cpu(j) { - struct cpufreq_nightmare_cpuinfo *dbs_info; - dbs_info = &per_cpu(od_cpu_dbs_info, j); - dbs_info->prev_cpu_idle = - get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - return count; -} - -static ssize_t store_freq_step_dec(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.freq_step_dec = min(input, 100u); - return count; -} - -static ssize_t store_freq_step(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.freq_step = min(input, 100u); - return count; -} - -static ssize_t store_cpu_up_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.cpu_up_rate = min(input, MAX_HOTPLUG_RATE); - return count; -} - -static ssize_t store_cpu_down_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.cpu_down_rate = min(input, MAX_HOTPLUG_RATE); - return count; -} - -static ssize_t store_up_nr_cpus(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.up_nr_cpus = min(input, num_possible_cpus()); - return count; -} - -static ssize_t store_max_cpu_lock(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.max_cpu_lock = min(input, num_possible_cpus()); - return count; -} - -static ssize_t store_min_cpu_lock(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - if (input == 0) - cpufreq_nightmare_min_cpu_unlock(); - else - cpufreq_nightmare_min_cpu_lock(input); - return count; -} - -static ssize_t store_hotplug_lock(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - int prev_lock; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - input = min(input, num_possible_cpus()); - prev_lock = atomic_read(&dbs_tuners_ins.hotplug_lock); - - if (prev_lock) - cpufreq_nightmare_cpu_unlock(prev_lock); - - if (input == 0) { - atomic_set(&dbs_tuners_ins.hotplug_lock, 0); - return count; - } - - ret = cpufreq_nightmare_cpu_lock(input); - if (ret) { - printk(KERN_ERR "[HOTPLUG] already locked with smaller value %d < %d\n", - atomic_read(&g_hotplug_lock), input); - return ret; - } - - atomic_set(&dbs_tuners_ins.hotplug_lock, input); - - return count; -} - -static ssize_t store_dvfs_debug(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.dvfs_debug = input > 0; - return count; -} - -static ssize_t store_inc_cpu_load_at_min_freq(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > 100) { - return -EINVAL; - } - dbs_tuners_ins.inc_cpu_load_at_min_freq = min(input,dbs_tuners_ins.inc_cpu_load); - return count; -} - -static ssize_t store_freq_for_responsiveness(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.freq_for_responsiveness = input; - return count; -} - -/* inc_cpu_load */ -static ssize_t store_inc_cpu_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.inc_cpu_load = max(min(input,100u),10u); - return count; -} - -/* dec_cpu_load */ -static ssize_t store_dec_cpu_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.dec_cpu_load = max(min(input,95u),5u); - return count; -} - -/* up_avg_load */ -static ssize_t store_up_avg_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.up_avg_load = max(min(input,100u),10u); - return count; -} - -/* down_avg_load */ -static ssize_t store_down_avg_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.down_avg_load = max(min(input,95u),5u); - return count; -} - -/* sampling_up_factor */ -static ssize_t store_sampling_up_factor(struct kobject *a, - struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_SAMPLING_UP_FACTOR || input < 1) - return -EINVAL; - dbs_tuners_ins.sampling_up_factor = input; - - return count; -} - -/* freq_up_brake */ -static ssize_t store_freq_up_brake(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - ret = sscanf(buf, "%u", &input); - if (ret != 1 || input < 0 || input > 100) - return -EINVAL; - - if (input == dbs_tuners_ins.freq_up_brake) { /* nothing to do */ - return count; - } - - dbs_tuners_ins.freq_up_brake = input; - - return count; -} - -/* hotplug_compare_level */ -static ssize_t store_hotplug_compare_level(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - ret = sscanf(buf, "%u", &input); - if (ret != 1 || input < 0 || input > 1) - return -EINVAL; - - if (input == dbs_tuners_ins.hotplug_compare_level) { /* nothing to do */ - return count; - } - - dbs_tuners_ins.hotplug_compare_level = input; - - return count; -} - -define_one_global_rw(sampling_rate); -define_one_global_rw(io_is_busy); -define_one_global_rw(sampling_down_factor); -define_one_global_rw(ignore_nice_load); -define_one_global_rw(freq_step_dec); -define_one_global_rw(freq_step); -define_one_global_rw(cpu_up_rate); -define_one_global_rw(cpu_down_rate); -define_one_global_rw(up_nr_cpus); -define_one_global_rw(max_cpu_lock); -define_one_global_rw(min_cpu_lock); -define_one_global_rw(hotplug_lock); -define_one_global_rw(dvfs_debug); -define_one_global_rw(inc_cpu_load_at_min_freq); -define_one_global_rw(freq_for_responsiveness); -define_one_global_rw(inc_cpu_load); -define_one_global_rw(dec_cpu_load); -define_one_global_rw(up_avg_load); -define_one_global_rw(down_avg_load); -define_one_global_rw(sampling_up_factor); -define_one_global_rw(freq_up_brake); -define_one_global_rw(hotplug_compare_level); - -static struct attribute *dbs_attributes[] = { - &sampling_rate_min.attr, - &sampling_rate.attr, - &sampling_down_factor.attr, - &ignore_nice_load.attr, - &io_is_busy.attr, - &freq_step_dec.attr, - &freq_step.attr, - &cpu_up_rate.attr, - &cpu_down_rate.attr, - &up_nr_cpus.attr, - /* priority: hotplug_lock > max_cpu_lock > min_cpu_lock - Exception: hotplug_lock on early_suspend uses min_cpu_lock */ - &max_cpu_lock.attr, - &min_cpu_lock.attr, - &hotplug_lock.attr, - &dvfs_debug.attr, - &hotplug_freq_1_1.attr, - &hotplug_freq_2_0.attr, -#ifndef CONFIG_CPU_EXYNOS4210 - &hotplug_freq_2_1.attr, - &hotplug_freq_3_0.attr, - &hotplug_freq_3_1.attr, - &hotplug_freq_4_0.attr, -#endif - &hotplug_rq_1_1.attr, - &hotplug_rq_2_0.attr, -#ifndef CONFIG_CPU_EXYNOS4210 - &hotplug_rq_2_1.attr, - &hotplug_rq_3_0.attr, - &hotplug_rq_3_1.attr, - &hotplug_rq_4_0.attr, -#endif - &inc_cpu_load_at_min_freq.attr, - &freq_for_responsiveness.attr, - &inc_cpu_load.attr, - &dec_cpu_load.attr, - &up_avg_load.attr, - &down_avg_load.attr, - &sampling_up_factor.attr, - &freq_up_brake.attr, - &hotplug_compare_level.attr, - NULL -}; - -static struct attribute_group dbs_attr_group = { - .attrs = dbs_attributes, - .name = "nightmare", -}; - -/************************** sysfs end ************************/ - -static void __ref cpu_up_work(struct work_struct *work) -{ - int cpu; - int online = num_online_cpus(); - int nr_up = dbs_tuners_ins.up_nr_cpus; - int min_cpu_lock = dbs_tuners_ins.min_cpu_lock; - int hotplug_lock = atomic_read(&g_hotplug_lock); - - if (hotplug_lock && min_cpu_lock) - nr_up = max(hotplug_lock, min_cpu_lock) - online; - else if (hotplug_lock) - nr_up = hotplug_lock - online; - else if (min_cpu_lock) - nr_up = max(nr_up, min_cpu_lock - online); - - if (online == 1) { - printk(KERN_ERR "CPU_UP 3\n"); - cpu_up(num_possible_cpus() - 1); - nr_up -= 1; - } - - for_each_cpu_not(cpu, cpu_online_mask) { - if (nr_up-- == 0) - break; - if (cpu == 0) - continue; - printk(KERN_ERR "CPU_UP %d\n", cpu); - cpu_up(cpu); - } -} - -static void cpu_down_work(struct work_struct *work) -{ - int cpu; - int online = num_online_cpus(); - int nr_down = 1; - int hotplug_lock = atomic_read(&g_hotplug_lock); - - if (hotplug_lock) - nr_down = online - hotplug_lock; - - for_each_online_cpu(cpu) { - if (cpu == 0) - continue; - printk(KERN_ERR "CPU_DOWN %d\n", cpu); - cpu_down(cpu); - if (--nr_down == 0) - break; - } -} - -static void debug_hotplug_check(int which, int rq_avg, int freq, - struct cpu_usage *usage) -{ - int cpu; - printk(KERN_ERR "CHECK %s rq %d.%02d freq %d [", which ? "up" : "down", - rq_avg / 100, rq_avg % 100, freq); - for_each_online_cpu(cpu) { - printk(KERN_ERR "(%d, %d), ", cpu, usage->load[cpu]); - } - printk(KERN_ERR "]\n"); -} - -static int check_up(void) -{ - int num_hist = hotplug_histories->num_hist; - struct cpu_usage *usage; - int freq, rq_avg; - int avg_load; - int i; - int up_rate = dbs_tuners_ins.cpu_up_rate; - unsigned int up_avg_load = dbs_tuners_ins.up_avg_load; - unsigned int hotplug_compare_level = dbs_tuners_ins.hotplug_compare_level; - int up_freq, up_rq; - int min_freq = INT_MAX; - int min_rq_avg = INT_MAX; - int min_avg_load = INT_MAX; - int online; - int hotplug_lock = atomic_read(&g_hotplug_lock); - - if (hotplug_lock > 0) - return 0; - - online = num_online_cpus(); - up_freq = hotplug_freq[online - 1][HOTPLUG_UP_INDEX]; - up_rq = hotplug_rq[online - 1][HOTPLUG_UP_INDEX]; - - if (online == num_possible_cpus()) - return 0; - - if (dbs_tuners_ins.max_cpu_lock != 0 - && online >= dbs_tuners_ins.max_cpu_lock) - return 0; - - if (dbs_tuners_ins.min_cpu_lock != 0 - && online < dbs_tuners_ins.min_cpu_lock) - return 1; - - if (num_hist == 0 || num_hist % up_rate) - return 0; - - if (hotplug_compare_level == 0) { - for (i = num_hist - 1; i >= num_hist - up_rate; --i) { - usage = &hotplug_histories->usage[i]; - - freq = usage->freq; - rq_avg = usage->rq_avg; - avg_load = usage->avg_load; - - min_freq = min(min_freq, freq); - min_rq_avg = min(min_rq_avg, rq_avg); - min_avg_load = min(min_avg_load, avg_load); - - if (dbs_tuners_ins.dvfs_debug) - debug_hotplug_check(1, rq_avg, freq, usage); - } - } else { - usage = &hotplug_histories->usage[num_hist - 1]; - min_freq = usage->freq; - min_rq_avg = usage->rq_avg; - min_avg_load = usage->avg_load; - if (dbs_tuners_ins.dvfs_debug) - debug_hotplug_check(1, min_rq_avg, min_freq, usage); - } - - if (min_freq >= up_freq && min_rq_avg > up_rq) { - if (online >= 1) { - if (min_avg_load < up_avg_load) - return 0; - } - printk(KERN_ERR "[HOTPLUG IN] %s %d>=%d && %d>%d\n", - __func__, min_freq, up_freq, min_rq_avg, up_rq); - hotplug_histories->num_hist = 0; - return 1; - } - return 0; -} - -static int check_down(void) -{ - int num_hist = hotplug_histories->num_hist; - struct cpu_usage *usage; - int freq, rq_avg; - int avg_load; - int i; - int down_rate = dbs_tuners_ins.cpu_down_rate; - unsigned int down_avg_load = dbs_tuners_ins.down_avg_load; - unsigned int hotplug_compare_level = dbs_tuners_ins.hotplug_compare_level; - int down_freq, down_rq; - int max_freq = 0; - int max_rq_avg = 0; - int max_avg_load = 0; - int online; - int hotplug_lock = atomic_read(&g_hotplug_lock); - - if (hotplug_lock > 0) - return 0; - - online = num_online_cpus(); - down_freq = hotplug_freq[online - 1][HOTPLUG_DOWN_INDEX]; - down_rq = hotplug_rq[online - 1][HOTPLUG_DOWN_INDEX]; - - if (online == 1) - return 0; - - if (dbs_tuners_ins.max_cpu_lock != 0 - && online > dbs_tuners_ins.max_cpu_lock) - return 1; - - if (dbs_tuners_ins.min_cpu_lock != 0 - && online <= dbs_tuners_ins.min_cpu_lock) - return 0; - - if (num_hist == 0 || num_hist % down_rate) - return 0; - - if (hotplug_compare_level == 0) { - for (i = num_hist - 1; i >= num_hist - down_rate; --i) { - usage = &hotplug_histories->usage[i]; - - freq = usage->freq; - rq_avg = usage->rq_avg; - avg_load = usage->avg_load; - - max_freq = max(max_freq, freq); - max_rq_avg = max(max_rq_avg, rq_avg); - max_avg_load = max(max_avg_load, avg_load); - - if (dbs_tuners_ins.dvfs_debug) - debug_hotplug_check(0, rq_avg, freq, usage); - } - } else { - usage = &hotplug_histories->usage[num_hist - 1]; - max_freq = usage->freq; - max_rq_avg = usage->rq_avg; - max_avg_load = usage->avg_load; - if (dbs_tuners_ins.dvfs_debug) - debug_hotplug_check(0, max_rq_avg, max_freq, usage); - } - - if ((max_freq <= down_freq && max_rq_avg <= down_rq) || (online >= 2 && max_avg_load < down_avg_load)) { - printk(KERN_ERR "[HOTPLUG OUT] %s %d<=%d && %d<%d\n", - __func__, max_freq, down_freq, max_rq_avg, down_rq); - hotplug_histories->num_hist = 0; - return 1; - } - - return 0; -} - -static void dbs_check_cpu(struct cpufreq_nightmare_cpuinfo *this_dbs_info) -{ - struct cpufreq_policy *policy; - unsigned int j; - int num_hist = hotplug_histories->num_hist; - int max_hotplug_rate = max(dbs_tuners_ins.cpu_up_rate,dbs_tuners_ins.cpu_down_rate); - int inc_cpu_load = dbs_tuners_ins.inc_cpu_load; - int dec_cpu_load = dbs_tuners_ins.dec_cpu_load; - unsigned int avg_rate_mult = 0; - - /* add total_load, avg_load to get average load */ - unsigned int total_load = 0; - unsigned int avg_load = 0; - int rq_avg = 0; - policy = this_dbs_info->cur_policy; - - hotplug_histories->usage[num_hist].freq = policy->cur; - hotplug_histories->usage[num_hist].rq_avg = get_nr_run_avg(); - - /* add total_load, avg_load to get average load */ - rq_avg = hotplug_histories->usage[num_hist].rq_avg; - - ++hotplug_histories->num_hist; - - for_each_cpu(j, policy->cpus) { - struct cpufreq_nightmare_cpuinfo *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; - cputime64_t prev_wall_time, prev_idle_time, prev_iowait_time; - unsigned int idle_time, wall_time, iowait_time; - int load; - //int freq_avg; - - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - - prev_wall_time = j_dbs_info->prev_cpu_wall; - prev_idle_time = j_dbs_info->prev_cpu_idle; - prev_iowait_time = j_dbs_info->prev_cpu_iowait; - - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - - wall_time = (unsigned int) cputime64_sub(cur_wall_time, - prev_wall_time); - j_dbs_info->prev_cpu_wall = cur_wall_time; - - idle_time = (unsigned int) cputime64_sub(cur_idle_time, - prev_idle_time); - j_dbs_info->prev_cpu_idle = cur_idle_time; - - iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, - prev_iowait_time); - j_dbs_info->prev_cpu_iowait = cur_iowait_time; - - if (dbs_tuners_ins.ignore_nice) { - u64 cur_nice; - unsigned long cur_nice_jiffies; - - cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - - j_dbs_info->prev_cpu_nice; - /* - * Assumption: nice time between sampling periods will - * be less than 2^32 jiffies for 32 bit sys - */ - cur_nice_jiffies = (unsigned long) - cputime64_to_jiffies64(cur_nice); - - j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - idle_time += jiffies_to_usecs(cur_nice_jiffies); - } - - if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) - idle_time -= iowait_time; - - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - load = 100 * (wall_time - idle_time) / wall_time; - - if (cpu_online(j)) { - total_load += load; - hotplug_histories->usage[num_hist].load[j] = load; - } else { - hotplug_histories->usage[num_hist].load[j] = -1; - } - - } - /* calculate the average load across all related CPUs */ - avg_load = total_load / num_online_cpus(); - hotplug_histories->usage[num_hist].avg_load = avg_load; - - /* Check for CPU hotplug */ - if (check_up()) { - queue_work_on(this_dbs_info->cpu, dvfs_workqueues,&this_dbs_info->up_work); - } - else if (check_down()) { - queue_work_on(this_dbs_info->cpu, dvfs_workqueues,&this_dbs_info->down_work); - } - if (hotplug_histories->num_hist == max_hotplug_rate) - hotplug_histories->num_hist = 0; - - /* CPUs Online Scale Frequency*/ - for_each_cpu(j, policy->cpus) { - struct cpufreq_nightmare_cpuinfo *j_dbs_info; - int load; - int index; - - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - - if (cpu_online(j)) { - index = 0; - load = hotplug_histories->usage[num_hist].load[j]; - // just a tips to scale up the frequency fastly - if (j_dbs_info->cur_policy->cur < dbs_tuners_ins.freq_for_responsiveness) - inc_cpu_load = dbs_tuners_ins.inc_cpu_load_at_min_freq; - else - inc_cpu_load = dbs_tuners_ins.inc_cpu_load; - - // Check for frequency increase or for frequency decrease - if (load >= inc_cpu_load) { - unsigned int inc_load = (load * j_dbs_info->cur_policy->min) / 100; - unsigned int inc_step = (dbs_tuners_ins.freq_step * j_dbs_info->cur_policy->min) / 100; - unsigned int inc; - unsigned int freq_up = 0; - - avg_rate_mult += dbs_tuners_ins.sampling_up_factor; - - // if we cannot increment the frequency anymore, break out early - if (j_dbs_info->cur_policy->cur == j_dbs_info->cur_policy->max) { - continue; - } - - inc = inc_load + inc_step; - inc -= (dbs_tuners_ins.freq_up_brake * j_dbs_info->cur_policy->min) / 100; - - freq_up = min(j_dbs_info->cur_policy->max,j_dbs_info->cur_policy->cur + inc); - - if (freq_up != j_dbs_info->cur_policy->cur) { - __cpufreq_driver_target(j_dbs_info->cur_policy, freq_up, CPUFREQ_RELATION_L); - } - - } - else if (load < dec_cpu_load && load > -1) { - unsigned int dec_load = ((100 - load) * (j_dbs_info->cur_policy->min)) / 100; - unsigned int dec_step = (dbs_tuners_ins.freq_step_dec * (j_dbs_info->cur_policy->min)) / 100; - unsigned int dec; - unsigned int freq_down = 0; - - avg_rate_mult += dbs_tuners_ins.sampling_down_factor; - - // if we cannot reduce the frequency anymore, break out early - if (j_dbs_info->cur_policy->cur == j_dbs_info->cur_policy->min) { - continue; - } - - dec = dec_load + dec_step; - - freq_down = max(j_dbs_info->cur_policy->min,j_dbs_info->cur_policy->cur - dec); - - if (freq_down != j_dbs_info->cur_policy->cur) { - __cpufreq_driver_target(j_dbs_info->cur_policy, freq_down, CPUFREQ_RELATION_L); - } - } - } - } - /* We want all CPUs to do sampling nearly on - * same jiffy - */ - if (avg_rate_mult > 0) - this_dbs_info->avg_rate_mult = (avg_rate_mult * 10) / num_online_cpus(); - else - this_dbs_info->avg_rate_mult = 10; - - return; -} - -static void do_dbs_timer(struct work_struct *work) -{ - struct cpufreq_nightmare_cpuinfo *dbs_info = - container_of(work, struct cpufreq_nightmare_cpuinfo, work.work); - unsigned int cpu = dbs_info->cpu; - int delay; - - mutex_lock(&dbs_info->timer_mutex); - - dbs_check_cpu(dbs_info); - /* We want all CPUs to do sampling nearly on - * same jiffy - */ - delay = usecs_to_jiffies((dbs_tuners_ins.sampling_rate * (dbs_info->avg_rate_mult < 10 ? 10 : dbs_info->avg_rate_mult)) / 10); - - if (num_online_cpus() > 1) - delay -= jiffies % delay; - - queue_delayed_work_on(cpu, dvfs_workqueues, &dbs_info->work, delay); - mutex_unlock(&dbs_info->timer_mutex); -} - -static inline void dbs_timer_init(struct cpufreq_nightmare_cpuinfo *dbs_info) -{ - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(DEF_START_DELAY * 1000 * 1000 - + dbs_tuners_ins.sampling_rate); - if (num_online_cpus() > 1) - delay -= jiffies % delay; - - INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - INIT_WORK(&dbs_info->up_work, cpu_up_work); - INIT_WORK(&dbs_info->down_work, cpu_down_work); - - queue_delayed_work_on(dbs_info->cpu, dvfs_workqueues, - &dbs_info->work, delay + 2 * HZ); -} - -static inline void dbs_timer_exit(struct cpufreq_nightmare_cpuinfo *dbs_info) -{ - cancel_delayed_work_sync(&dbs_info->work); - cancel_work_sync(&dbs_info->up_work); - cancel_work_sync(&dbs_info->down_work); -} - -static int reboot_notifier_call(struct notifier_block *this, - unsigned long code, void *_cmd) -{ - atomic_set(&g_hotplug_lock, 1); - return NOTIFY_DONE; -} - -static struct notifier_block reboot_notifier = { - .notifier_call = reboot_notifier_call, -}; - -#ifdef CONFIG_HAS_EARLYSUSPEND -static struct early_suspend early_suspend; -unsigned int previous_freq_step; -unsigned int previous_sampling_rate; -static void cpufreq_nightmare_early_suspend(struct early_suspend *h) -{ -#if EARLYSUSPEND_HOTPLUGLOCK - dbs_tuners_ins.early_suspend = - atomic_read(&g_hotplug_lock); -#endif - previous_freq_step = dbs_tuners_ins.freq_step; - previous_sampling_rate = dbs_tuners_ins.sampling_rate; - dbs_tuners_ins.freq_step = 10; - dbs_tuners_ins.sampling_rate = 200000; -#if EARLYSUSPEND_HOTPLUGLOCK - atomic_set(&g_hotplug_lock, - (dbs_tuners_ins.min_cpu_lock) ? dbs_tuners_ins.min_cpu_lock : 1); - apply_hotplug_lock(); - stop_rq_work(); -#endif -} -static void cpufreq_nightmare_late_resume(struct early_suspend *h) -{ -#if EARLYSUSPEND_HOTPLUGLOCK - atomic_set(&g_hotplug_lock, dbs_tuners_ins.early_suspend); -#endif - dbs_tuners_ins.early_suspend = -1; - dbs_tuners_ins.freq_step = previous_freq_step; - dbs_tuners_ins.sampling_rate = previous_sampling_rate; -#if EARLYSUSPEND_HOTPLUGLOCK - apply_hotplug_lock(); - start_rq_work(); -#endif -} -#endif - -static int cpufreq_governor_nightmare(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpufreq_nightmare_cpuinfo *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - dbs_tuners_ins.max_freq = policy->max; - dbs_tuners_ins.min_freq = policy->min; - hotplug_histories->num_hist = 0; - start_rq_work(); - - mutex_lock(&dbs_mutex); - - dbs_enable++; - for_each_cpu(j, policy->cpus) { - struct cpufreq_nightmare_cpuinfo *j_dbs_info; - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - j_dbs_info->prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - this_dbs_info->cpu = cpu; - this_dbs_info->avg_rate_mult = 20; - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - rc = sysfs_create_group(cpufreq_global_kobject, - &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - min_sampling_rate = MIN_SAMPLING_RATE; - dbs_tuners_ins.sampling_rate = DEF_SAMPLING_RATE; - dbs_tuners_ins.io_is_busy = 0; - } - mutex_unlock(&dbs_mutex); - - register_reboot_notifier(&reboot_notifier); - - mutex_init(&this_dbs_info->timer_mutex); - dbs_timer_init(this_dbs_info); - -#if !EARLYSUSPEND_HOTPLUGLOCK - register_pm_notifier(&pm_notifier); -#endif -#ifdef CONFIG_HAS_EARLYSUSPEND - register_early_suspend(&early_suspend); -#endif - break; - - case CPUFREQ_GOV_STOP: -#ifdef CONFIG_HAS_EARLYSUSPEND - unregister_early_suspend(&early_suspend); -#endif -#if !EARLYSUSPEND_HOTPLUGLOCK - unregister_pm_notifier(&pm_notifier); -#endif - - dbs_timer_exit(this_dbs_info); - - mutex_lock(&dbs_mutex); - mutex_destroy(&this_dbs_info->timer_mutex); - - unregister_reboot_notifier(&reboot_notifier); - - dbs_enable--; - mutex_unlock(&dbs_mutex); - - stop_rq_work(); - - if (!dbs_enable) - sysfs_remove_group(cpufreq_global_kobject, - &dbs_attr_group); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_dbs_info->timer_mutex); - - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->max, - CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->min, - CPUFREQ_RELATION_L); - - mutex_unlock(&this_dbs_info->timer_mutex); - break; - } - return 0; -} - -static int __init cpufreq_gov_nightmare_init(void) -{ - int ret; - - ret = init_rq_avg(); - if (ret) - return ret; - - hotplug_histories = kzalloc(sizeof(struct cpu_usage_history), GFP_KERNEL); - if (!hotplug_histories) { - pr_err("%s cannot create hotplug history array\n", __func__); - ret = -ENOMEM; - goto err_hist; - } - - dvfs_workqueues = create_workqueue("knightmare"); - if (!dvfs_workqueues) { - pr_err("%s cannot create workqueue\n", __func__); - ret = -ENOMEM; - goto err_queue; - } - - ret = cpufreq_register_governor(&cpufreq_gov_nightmare); - if (ret) - goto err_reg; - -#ifdef CONFIG_HAS_EARLYSUSPEND - early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; - early_suspend.suspend = cpufreq_nightmare_early_suspend; - early_suspend.resume = cpufreq_nightmare_late_resume; -#endif - - return ret; - -err_reg: - destroy_workqueue(dvfs_workqueues); -err_queue: - kfree(hotplug_histories); -err_hist: - kfree(rq_data); - return ret; -} - -static void __exit cpufreq_gov_nightmare_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_nightmare); - destroy_workqueue(dvfs_workqueues); - kfree(hotplug_histories); - kfree(rq_data); -} - -MODULE_AUTHOR("ByungChang Cha "); -MODULE_DESCRIPTION("'cpufreq_nightmare' - A dynamic cpufreq/cpuhotplug governor"); -MODULE_LICENSE("GPL"); - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_NIGHTMARE -fs_initcall(cpufreq_gov_nightmare_init); -#else -module_init(cpufreq_gov_nightmare_init); -#endif -module_exit(cpufreq_gov_nightmare_exit); diff --git a/drivers/cpufreq/cpufreq_pegasusq.c b/drivers/cpufreq/cpufreq_pegasusq.c deleted file mode 100644 index 230abf81..00000000 --- a/drivers/cpufreq/cpufreq_pegasusq.c +++ /dev/null @@ -1,1636 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_pegasusq.c - * - * Copyright (C) 2011 Samsung Electronics co. ltd - * ByungChang Cha - * - * Based on ondemand governor - * Copyright (C) 2001 Russell King - * (C) 2003 Venkatesh Pallipadi . - * Jun Nakajima - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef CONFIG_HAS_EARLYSUSPEND -#include -#endif -#define EARLYSUSPEND_HOTPLUGLOCK 1 - -/* - * runqueue average - */ - -#define RQ_AVG_TIMER_RATE 10 - -static bool boostpulse_relayf = false; -static unsigned int boostpulse_relay_sr = 0; -static unsigned int Lboostpulse_value = 1134000; - -extern void apenable_auto_hotplug(bool state); -extern bool apget_enable_auto_hotplug(void); -static bool prev_apenable; - -struct runqueue_data { - unsigned int nr_run_avg; - unsigned int update_rate; - int64_t last_time; - int64_t total_time; - struct delayed_work work; - struct workqueue_struct *nr_run_wq; - spinlock_t lock; -}; - -static struct runqueue_data *rq_data; -static void rq_work_fn(struct work_struct *work); - -static void start_rq_work(void) -{ - rq_data->nr_run_avg = 0; - rq_data->last_time = 0; - rq_data->total_time = 0; - if (rq_data->nr_run_wq == NULL) - rq_data->nr_run_wq = - create_singlethread_workqueue("nr_run_avg"); - - queue_delayed_work(rq_data->nr_run_wq, &rq_data->work, - msecs_to_jiffies(rq_data->update_rate)); - return; -} - -static void stop_rq_work(void) -{ - if (rq_data->nr_run_wq) - cancel_delayed_work(&rq_data->work); - return; -} - -static int __init init_rq_avg(void) -{ - rq_data = kzalloc(sizeof(struct runqueue_data), GFP_KERNEL); - if (rq_data == NULL) { - pr_err("%s cannot allocate memory\n", __func__); - return -ENOMEM; - } - spin_lock_init(&rq_data->lock); - rq_data->update_rate = RQ_AVG_TIMER_RATE; - INIT_DELAYED_WORK_DEFERRABLE(&rq_data->work, rq_work_fn); - - return 0; -} - -static void rq_work_fn(struct work_struct *work) -{ - int64_t time_diff = 0; - int64_t nr_run = 0; - unsigned long flags = 0; - int64_t cur_time = ktime_to_ns(ktime_get()); - - spin_lock_irqsave(&rq_data->lock, flags); - - if (rq_data->last_time == 0) - rq_data->last_time = cur_time; - if (rq_data->nr_run_avg == 0) - rq_data->total_time = 0; - - nr_run = nr_running() * 100; - time_diff = cur_time - rq_data->last_time; - do_div(time_diff, 1000 * 1000); - - if (time_diff != 0 && rq_data->total_time != 0) { - nr_run = (nr_run * time_diff) + - (rq_data->nr_run_avg * rq_data->total_time); - do_div(nr_run, rq_data->total_time + time_diff); - } - rq_data->nr_run_avg = nr_run; - rq_data->total_time += time_diff; - rq_data->last_time = cur_time; - - if (rq_data->update_rate != 0) - queue_delayed_work(rq_data->nr_run_wq, &rq_data->work, - msecs_to_jiffies(rq_data->update_rate)); - - spin_unlock_irqrestore(&rq_data->lock, flags); -} - -static unsigned int get_nr_run_avg(void) -{ - unsigned int nr_run_avg; - unsigned long flags = 0; - - spin_lock_irqsave(&rq_data->lock, flags); - nr_run_avg = rq_data->nr_run_avg; - rq_data->nr_run_avg = 0; - spin_unlock_irqrestore(&rq_data->lock, flags); - - return nr_run_avg; -} - - -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ - -#define DEF_SAMPLING_DOWN_FACTOR (3) -#define MAX_SAMPLING_DOWN_FACTOR (100000) -#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (14) -#define DEF_FREQUENCY_UP_THRESHOLD (95) - -/* for multiple freq_step */ -#define DEF_UP_THRESHOLD_DIFF (5) - -#define DEF_FREQUENCY_MIN_SAMPLE_RATE (10000) -#define MIN_FREQUENCY_UP_THRESHOLD (11) -#define MAX_FREQUENCY_UP_THRESHOLD (100) -#define DEF_SAMPLING_RATE (40000) -#define MIN_SAMPLING_RATE (10000) -#define MAX_HOTPLUG_RATE (40u) - -#define DEF_MAX_CPU_LOCK (0) -#define DEF_MIN_CPU_LOCK (0) -#define DEF_CPU_UP_FREQ (500000) -#define DEF_CPU_DOWN_FREQ (200000) -#define DEF_UP_NR_CPUS (1) -#define DEF_CPU_UP_RATE (9) -#define DEF_CPU_DOWN_RATE (3) -#define DEF_FREQ_STEP (30) -/* for multiple freq_step */ -#define DEF_FREQ_STEP_DEC (13) - -#define DEF_START_DELAY (0) - -#define UP_THRESHOLD_AT_MIN_FREQ (55) -#define FREQ_FOR_RESPONSIVENESS (400000) -/* for fast decrease */ -#define FREQ_FOR_FAST_DOWN (1200000) -#define UP_THRESHOLD_AT_FAST_DOWN (95) - -#define HOTPLUG_DOWN_INDEX (0) -#define HOTPLUG_UP_INDEX (1) - -#ifdef CONFIG_MACH_MIDAS -static int hotplug_rq[4][2] = { - {0, 100}, {100, 200}, {200, 300}, {300, 0} -}; - -static int hotplug_freq[4][2] = { - {0, 500000}, - {200000, 600000}, - {500000, 800000}, - {500000, 0} -}; -#else -static int hotplug_rq[4][2] = { - {0, 200}, {200, 200}, {200, 300}, {300, 0} -}; - -static int hotplug_freq[4][2] = { - {0, 800000}, - {500000, 500000}, - {200000, 500000}, - {200000, 0} -}; -#endif - -static unsigned int min_sampling_rate; - -static void do_dbs_timer(struct work_struct *work); -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_PEGASUSQ -static -#endif -struct cpufreq_governor cpufreq_gov_pegasusq = { - .name = "pegasusq", - .governor = cpufreq_governor_dbs, - .owner = THIS_MODULE, -}; - -/* Sampling types */ -enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; - -struct cpu_dbs_info_s { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_iowait; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - struct work_struct up_work; - struct work_struct down_work; - struct cpufreq_frequency_table *freq_table; - unsigned int rate_mult; - int cpu; - /* - * percpu mutex that serializes governor limit change with - * do_dbs_timer invocation. We do not want do_dbs_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); - -struct workqueue_struct *dvfs_workqueue; - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - -/* - * dbs_mutex protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); - -static struct dbs_tuners { - unsigned int sampling_rate; - unsigned int up_threshold; - unsigned int down_differential; - unsigned int ignore_nice; - unsigned int sampling_down_factor; - unsigned int io_is_busy; - /* pegasusq tuners */ - unsigned int freq_step; - unsigned int cpu_up_rate; - unsigned int cpu_down_rate; - unsigned int cpu_up_freq; - unsigned int cpu_down_freq; - unsigned int up_nr_cpus; - unsigned int max_cpu_lock; - unsigned int min_cpu_lock; - atomic_t hotplug_lock; - unsigned int dvfs_debug; - unsigned int max_freq; - unsigned int min_freq; -#ifdef CONFIG_HAS_EARLYSUSPEND - int early_suspend; -#endif - unsigned int up_threshold_at_min_freq; - unsigned int freq_for_responsiveness; -} dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, - .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, - .ignore_nice = 1, - .freq_step = DEF_FREQ_STEP, - .cpu_up_rate = DEF_CPU_UP_RATE, - .cpu_down_rate = DEF_CPU_DOWN_RATE, - .cpu_up_freq = DEF_CPU_UP_FREQ, - .cpu_down_freq = DEF_CPU_DOWN_FREQ, - .up_nr_cpus = DEF_UP_NR_CPUS, - .max_cpu_lock = DEF_MAX_CPU_LOCK, - .min_cpu_lock = DEF_MIN_CPU_LOCK, - .hotplug_lock = ATOMIC_INIT(0), - .dvfs_debug = 0, -#ifdef CONFIG_HAS_EARLYSUSPEND - .early_suspend = -1, -#endif - .up_threshold_at_min_freq = UP_THRESHOLD_AT_MIN_FREQ, - .freq_for_responsiveness = FREQ_FOR_RESPONSIVENESS, -}; - - -/* - * CPU hotplug lock interface - */ - -static atomic_t g_hotplug_count = ATOMIC_INIT(0); -static atomic_t g_hotplug_lock = ATOMIC_INIT(0); - -static void apply_hotplug_lock(void) -{ - int online, possible, lock, flag; - struct work_struct *work; - struct cpu_dbs_info_s *dbs_info; - - /* do turn_on/off cpus */ - dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ - online = num_online_cpus(); - possible = num_possible_cpus(); - lock = atomic_read(&g_hotplug_lock); - flag = lock - online; - - if (flag == 0) - return; - - work = flag > 0 ? &dbs_info->up_work : &dbs_info->down_work; - - pr_debug("%s online %d possible %d lock %d flag %d %d\n", - __func__, online, possible, lock, flag, (int)abs(flag)); - - queue_work_on(dbs_info->cpu, dvfs_workqueue, work); -} - -int cpufreq_pegasusq_cpu_lock(int num_core) -{ - int prev_lock; - - if (num_core < 1 || num_core > num_possible_cpus()) - return -EINVAL; - - prev_lock = atomic_read(&g_hotplug_lock); - - if (prev_lock != 0 && prev_lock < num_core) - return -EINVAL; - else if (prev_lock == num_core) - atomic_inc(&g_hotplug_count); - - atomic_set(&g_hotplug_lock, num_core); - atomic_set(&g_hotplug_count, 1); - apply_hotplug_lock(); - - return 0; -} - -int cpufreq_pegasusq_cpu_unlock(int num_core) -{ - int prev_lock = atomic_read(&g_hotplug_lock); - - if (prev_lock < num_core) - return 0; - else if (prev_lock == num_core) - atomic_dec(&g_hotplug_count); - - if (atomic_read(&g_hotplug_count) == 0) - atomic_set(&g_hotplug_lock, 0); - - return 0; -} - -void cpufreq_pegasusq_min_cpu_lock(unsigned int num_core) -{ - int online, flag; - struct cpu_dbs_info_s *dbs_info; - - dbs_tuners_ins.min_cpu_lock = min(num_core, num_possible_cpus()); - - dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ - online = num_online_cpus(); - flag = (int)num_core - online; - if (flag <= 0) - return; - queue_work_on(dbs_info->cpu, dvfs_workqueue, &dbs_info->up_work); -} - -void cpufreq_pegasusq_min_cpu_unlock(void) -{ - int online, lock, flag; - struct cpu_dbs_info_s *dbs_info; - - dbs_tuners_ins.min_cpu_lock = 0; - - dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ - online = num_online_cpus(); - lock = atomic_read(&g_hotplug_lock); - if (lock == 0) - return; - flag = lock - online; - if (flag >= 0) - return; - queue_work_on(dbs_info->cpu, dvfs_workqueue, &dbs_info->down_work); -} - -/* - * History of CPU usage - */ -struct cpu_usage { - unsigned int freq; - unsigned int load[NR_CPUS]; - unsigned int rq_avg; - unsigned int avg_load; -}; - -struct cpu_usage_history { - struct cpu_usage usage[MAX_HOTPLUG_RATE]; - unsigned int num_hist; -}; - -struct cpu_usage_history *hotplug_history; - -static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, - u64 *wall) -{ - u64 idle_time; - u64 cur_wall_time; - u64 busy_time; - - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - - busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - - idle_time = cur_wall_time - busy_time; - if (wall) - *wall = jiffies_to_usecs(cur_wall_time); - - return jiffies_to_usecs(idle_time); -} - -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) -{ - u64 idle_time = get_cpu_idle_time_us(cpu, wall); - - if (idle_time == -1ULL) - return get_cpu_idle_time_jiffy(cpu, wall); - - return idle_time; -} - -static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, - cputime64_t *wall) -{ - u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); - - if (iowait_time == -1ULL) - return 0; - - return iowait_time; -} - -/************************** sysfs interface ************************/ - -static ssize_t show_sampling_rate_min(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", min_sampling_rate); -} - -static ssize_t show_boostpulse_value(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", Lboostpulse_value / 1000); -} - -define_one_global_ro(sampling_rate_min); - -/* cpufreq_pegasusq Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(io_is_busy, io_is_busy); -show_one(up_threshold, up_threshold); -show_one(sampling_down_factor, sampling_down_factor); -show_one(ignore_nice_load, ignore_nice); -show_one(down_differential, down_differential); -show_one(freq_step, freq_step); -show_one(cpu_up_rate, cpu_up_rate); -show_one(cpu_down_rate, cpu_down_rate); -show_one(cpu_up_freq, cpu_up_freq); -show_one(cpu_down_freq, cpu_down_freq); -show_one(up_nr_cpus, up_nr_cpus); -show_one(max_cpu_lock, max_cpu_lock); -show_one(min_cpu_lock, min_cpu_lock); -show_one(dvfs_debug, dvfs_debug); -show_one(up_threshold_at_min_freq, up_threshold_at_min_freq); -show_one(freq_for_responsiveness, freq_for_responsiveness); -static ssize_t show_hotplug_lock(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", atomic_read(&g_hotplug_lock)); -} - -#define show_hotplug_param(file_name, num_core, up_down) \ -static ssize_t show_##file_name##_##num_core##_##up_down \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", file_name[num_core - 1][up_down]); \ -} - -#define store_hotplug_param(file_name, num_core, up_down) \ -static ssize_t store_##file_name##_##num_core##_##up_down \ -(struct kobject *kobj, struct attribute *attr, \ - const char *buf, size_t count) \ -{ \ - unsigned int input; \ - int ret; \ - ret = sscanf(buf, "%u", &input); \ - if (ret != 1) \ - return -EINVAL; \ - file_name[num_core - 1][up_down] = input; \ - return count; \ -} - -show_hotplug_param(hotplug_freq, 1, 1); -show_hotplug_param(hotplug_freq, 2, 0); -show_hotplug_param(hotplug_freq, 2, 1); -show_hotplug_param(hotplug_freq, 3, 0); -show_hotplug_param(hotplug_freq, 3, 1); -show_hotplug_param(hotplug_freq, 4, 0); - -show_hotplug_param(hotplug_rq, 1, 1); -show_hotplug_param(hotplug_rq, 2, 0); -show_hotplug_param(hotplug_rq, 2, 1); -show_hotplug_param(hotplug_rq, 3, 0); -show_hotplug_param(hotplug_rq, 3, 1); -show_hotplug_param(hotplug_rq, 4, 0); - -store_hotplug_param(hotplug_freq, 1, 1); -store_hotplug_param(hotplug_freq, 2, 0); -store_hotplug_param(hotplug_freq, 2, 1); -store_hotplug_param(hotplug_freq, 3, 0); -store_hotplug_param(hotplug_freq, 3, 1); -store_hotplug_param(hotplug_freq, 4, 0); - -store_hotplug_param(hotplug_rq, 1, 1); -store_hotplug_param(hotplug_rq, 2, 0); -store_hotplug_param(hotplug_rq, 2, 1); -store_hotplug_param(hotplug_rq, 3, 0); -store_hotplug_param(hotplug_rq, 3, 1); -store_hotplug_param(hotplug_rq, 4, 0); - -define_one_global_rw(hotplug_freq_1_1); -define_one_global_rw(hotplug_freq_2_0); -define_one_global_rw(hotplug_freq_2_1); -define_one_global_rw(hotplug_freq_3_0); -define_one_global_rw(hotplug_freq_3_1); -define_one_global_rw(hotplug_freq_4_0); - -define_one_global_rw(hotplug_rq_1_1); -define_one_global_rw(hotplug_rq_2_0); -define_one_global_rw(hotplug_rq_2_1); -define_one_global_rw(hotplug_rq_3_0); -define_one_global_rw(hotplug_rq_3_1); -define_one_global_rw(hotplug_rq_4_0); - -static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); - return count; -} - -static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - dbs_tuners_ins.io_is_busy = !!input; - return count; -} - -static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || - input < MIN_FREQUENCY_UP_THRESHOLD) { - return -EINVAL; - } - dbs_tuners_ins.up_threshold = input; - return count; -} - -static ssize_t store_sampling_down_factor(struct kobject *a, - struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input, j; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) - return -EINVAL; - dbs_tuners_ins.sampling_down_factor = input; - - /* Reset down sampling multiplier in case it was active */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(od_cpu_dbs_info, j); - dbs_info->rate_mult = 1; - } - return count; -} - -static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - return count; - } - dbs_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(od_cpu_dbs_info, j); - dbs_info->prev_cpu_idle = - get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - return count; -} - -static ssize_t store_down_differential(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.down_differential = min(input, 100u); - return count; -} - -static ssize_t store_freq_step(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.freq_step = min(input, 100u); - return count; -} - -static ssize_t store_cpu_up_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.cpu_up_rate = min(input, MAX_HOTPLUG_RATE); - return count; -} - -static ssize_t store_cpu_down_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.cpu_down_rate = min(input, MAX_HOTPLUG_RATE); - return count; -} - -static ssize_t store_cpu_up_freq(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.cpu_up_freq = min(input, dbs_tuners_ins.max_freq); - return count; -} - -static ssize_t store_cpu_down_freq(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.cpu_down_freq = max(input, dbs_tuners_ins.min_freq); - return count; -} - -static ssize_t store_up_nr_cpus(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.up_nr_cpus = min(input, num_possible_cpus()); - return count; -} - -static ssize_t store_max_cpu_lock(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.max_cpu_lock = min(input, num_possible_cpus()); - return count; -} - -static ssize_t store_min_cpu_lock(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - if (input == 0) - cpufreq_pegasusq_min_cpu_unlock(); - else - cpufreq_pegasusq_min_cpu_lock(input); - return count; -} - -static ssize_t store_hotplug_lock(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - int prev_lock; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - input = min(input, num_possible_cpus()); - prev_lock = atomic_read(&dbs_tuners_ins.hotplug_lock); - - if (prev_lock) - cpufreq_pegasusq_cpu_unlock(prev_lock); - - if (input == 0) { - atomic_set(&dbs_tuners_ins.hotplug_lock, 0); - return count; - } - - ret = cpufreq_pegasusq_cpu_lock(input); - if (ret) { - printk(KERN_ERR "[HOTPLUG] already locked with smaller value %d < %d\n", - atomic_read(&g_hotplug_lock), input); - return ret; - } - - atomic_set(&dbs_tuners_ins.hotplug_lock, input); - - return count; -} - -static ssize_t store_dvfs_debug(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.dvfs_debug = input > 0; - return count; -} - -static ssize_t store_up_threshold_at_min_freq(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || - input < MIN_FREQUENCY_UP_THRESHOLD) { - return -EINVAL; - } - dbs_tuners_ins.up_threshold_at_min_freq = input; - return count; -} - -static ssize_t store_freq_for_responsiveness(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.freq_for_responsiveness = input; - return count; -} - -static ssize_t store_boostpulse_value(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - if (input * 1000 > 2106000) - input = 2106000; - - Lboostpulse_value = input * 1000; - return count; -} - -define_one_global_rw(sampling_rate); -define_one_global_rw(io_is_busy); -define_one_global_rw(up_threshold); -define_one_global_rw(sampling_down_factor); -define_one_global_rw(ignore_nice_load); -define_one_global_rw(down_differential); -define_one_global_rw(freq_step); -define_one_global_rw(cpu_up_rate); -define_one_global_rw(cpu_down_rate); -define_one_global_rw(cpu_up_freq); -define_one_global_rw(cpu_down_freq); -define_one_global_rw(up_nr_cpus); -define_one_global_rw(max_cpu_lock); -define_one_global_rw(min_cpu_lock); -define_one_global_rw(hotplug_lock); -define_one_global_rw(dvfs_debug); -define_one_global_rw(up_threshold_at_min_freq); -define_one_global_rw(freq_for_responsiveness); -define_one_global_rw(boostpulse_value); - -static struct attribute *dbs_attributes[] = { - &sampling_rate_min.attr, - &sampling_rate.attr, - &up_threshold.attr, - &sampling_down_factor.attr, - &ignore_nice_load.attr, - &io_is_busy.attr, - &down_differential.attr, - &freq_step.attr, - &cpu_up_rate.attr, - &cpu_down_rate.attr, - &cpu_up_freq.attr, - &cpu_down_freq.attr, - &up_nr_cpus.attr, - /* priority: hotplug_lock > max_cpu_lock > min_cpu_lock - Exception: hotplug_lock on early_suspend uses min_cpu_lock */ - &max_cpu_lock.attr, - &min_cpu_lock.attr, - &hotplug_lock.attr, - &dvfs_debug.attr, - &hotplug_freq_1_1.attr, - &hotplug_freq_2_0.attr, - &hotplug_freq_2_1.attr, - &hotplug_freq_3_0.attr, - &hotplug_freq_3_1.attr, - &hotplug_freq_4_0.attr, - &hotplug_rq_1_1.attr, - &hotplug_rq_2_0.attr, - &hotplug_rq_2_1.attr, - &hotplug_rq_3_0.attr, - &hotplug_rq_3_1.attr, - &hotplug_rq_4_0.attr, - &up_threshold_at_min_freq.attr, - &freq_for_responsiveness.attr, - &boostpulse_value.attr, - NULL -}; - -static struct attribute_group dbs_attr_group = { - .attrs = dbs_attributes, - .name = "pegasusq", -}; - -/************************** sysfs end ************************/ - -static void __cpuinit cpu_up_work(struct work_struct *work) -{ - int cpu; - int online = num_online_cpus(); - int nr_up = dbs_tuners_ins.up_nr_cpus; - int min_cpu_lock = dbs_tuners_ins.min_cpu_lock; - int hotplug_lock = atomic_read(&g_hotplug_lock); - - if (hotplug_lock && min_cpu_lock) - nr_up = max(hotplug_lock, min_cpu_lock) - online; - else if (hotplug_lock) - nr_up = hotplug_lock - online; - else if (min_cpu_lock) - nr_up = max(nr_up, min_cpu_lock - online); - - if (online == 1) { - printk(KERN_ERR "CPU_UP 3\n"); - cpu_up(num_possible_cpus() - 1); - nr_up -= 1; - } - - for_each_cpu_not(cpu, cpu_online_mask) { - if (nr_up-- == 0) - break; - if (cpu == 0) - continue; - printk(KERN_ERR "CPU_UP %d\n", cpu); - cpu_up(cpu); - } -} - -static void cpu_down_work(struct work_struct *work) -{ - int cpu; - int online = num_online_cpus(); - int nr_down = 1; - int hotplug_lock = atomic_read(&g_hotplug_lock); - - if (hotplug_lock) - nr_down = online - hotplug_lock; - - for_each_online_cpu(cpu) { - if (cpu == 0) - continue; - printk(KERN_ERR "CPU_DOWN %d\n", cpu); - cpu_down(cpu); - if (--nr_down == 0) - break; - } -} - -static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) -{ -#ifndef CONFIG_ARCH_EXYNOS4 - if (p->cur == p->max) - return; -#endif - - __cpufreq_driver_target(p, freq, CPUFREQ_RELATION_L); -} - -/* - * print hotplug debugging info. - * which 1 : UP, 0 : DOWN - */ -static void debug_hotplug_check(int which, int rq_avg, int freq, - struct cpu_usage *usage) -{ - int cpu; - printk(KERN_ERR "CHECK %s rq %d.%02d freq %d [", which ? "up" : "down", - rq_avg / 100, rq_avg % 100, freq); - for_each_online_cpu(cpu) { - printk(KERN_ERR "(%d, %d), ", cpu, usage->load[cpu]); - } - printk(KERN_ERR "]\n"); -} - -static int check_up(void) -{ - int num_hist = hotplug_history->num_hist; - struct cpu_usage *usage; - int freq, rq_avg; - int avg_load; - int i; - int up_rate = dbs_tuners_ins.cpu_up_rate; - int up_freq, up_rq; - int min_freq = INT_MAX; - int min_rq_avg = INT_MAX; - int min_avg_load = INT_MAX; - int online; - int hotplug_lock = atomic_read(&g_hotplug_lock); - - if (hotplug_lock > 0) - return 0; - - online = num_online_cpus(); - up_freq = hotplug_freq[online - 1][HOTPLUG_UP_INDEX]; - up_rq = hotplug_rq[online - 1][HOTPLUG_UP_INDEX]; - - if (online == num_possible_cpus()) - return 0; - - if (dbs_tuners_ins.max_cpu_lock != 0 - && online >= dbs_tuners_ins.max_cpu_lock) - return 0; - - if (dbs_tuners_ins.min_cpu_lock != 0 - && online < dbs_tuners_ins.min_cpu_lock) - return 1; - - if (num_hist == 0 || num_hist % up_rate) - return 0; - - for (i = num_hist - 1; i >= num_hist - up_rate; --i) { - usage = &hotplug_history->usage[i]; - - freq = usage->freq; - rq_avg = usage->rq_avg; - avg_load = usage->avg_load; - - min_freq = min(min_freq, freq); - min_rq_avg = min(min_rq_avg, rq_avg); - min_avg_load = min(min_avg_load, avg_load); - - if (dbs_tuners_ins.dvfs_debug) - debug_hotplug_check(1, rq_avg, freq, usage); - } - - if (min_freq >= up_freq && min_rq_avg > up_rq) { - if (online >= 2) { - if (min_avg_load < 65) - return 0; - } - printk(KERN_ERR "[HOTPLUG IN] %s %d>=%d && %d>%d\n", - __func__, min_freq, up_freq, min_rq_avg, up_rq); - hotplug_history->num_hist = 0; - return 1; - } - return 0; -} - -static int check_down(void) -{ - int num_hist = hotplug_history->num_hist; - struct cpu_usage *usage; - int freq, rq_avg; - int avg_load; - int i; - int down_rate = dbs_tuners_ins.cpu_down_rate; - int down_freq, down_rq; - int max_freq = 0; - int max_rq_avg = 0; - int max_avg_load = 0; - int online; - int hotplug_lock = atomic_read(&g_hotplug_lock); - - if (hotplug_lock > 0) - return 0; - - online = num_online_cpus(); - down_freq = hotplug_freq[online - 1][HOTPLUG_DOWN_INDEX]; - down_rq = hotplug_rq[online - 1][HOTPLUG_DOWN_INDEX]; - - if (online == 1) - return 0; - - if (dbs_tuners_ins.max_cpu_lock != 0 - && online > dbs_tuners_ins.max_cpu_lock) - return 1; - - if (dbs_tuners_ins.min_cpu_lock != 0 - && online <= dbs_tuners_ins.min_cpu_lock) - return 0; - - if (num_hist == 0 || num_hist % down_rate) - return 0; - - for (i = num_hist - 1; i >= num_hist - down_rate; --i) { - usage = &hotplug_history->usage[i]; - - freq = usage->freq; - rq_avg = usage->rq_avg; - avg_load = usage->avg_load; - - max_freq = max(max_freq, freq); - max_rq_avg = max(max_rq_avg, rq_avg); - max_avg_load = max(max_avg_load, avg_load); - - if (dbs_tuners_ins.dvfs_debug) - debug_hotplug_check(0, rq_avg, freq, usage); - } - - if ((max_freq <= down_freq && max_rq_avg <= down_rq) - || (online >= 3 && max_avg_load < 30)) { - printk(KERN_ERR "[HOTPLUG OUT] %s %d<=%d && %d<%d\n", - __func__, max_freq, down_freq, max_rq_avg, down_rq); - hotplug_history->num_hist = 0; - return 1; - } - - return 0; -} - -static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) -{ - unsigned int max_load_freq; - - struct cpufreq_policy *policy; - unsigned int j; - int num_hist = hotplug_history->num_hist; - int max_hotplug_rate = max(dbs_tuners_ins.cpu_up_rate, - dbs_tuners_ins.cpu_down_rate); - int up_threshold = dbs_tuners_ins.up_threshold; - - /* add total_load, avg_load to get average load */ - unsigned int total_load = 0; - unsigned int avg_load = 0; - int load_each[4] = {-1, -1, -1, -1}; - int rq_avg = 0; - policy = this_dbs_info->cur_policy; - - if (boostpulse_relayf) - { - if (boostpulse_relay_sr != 0) - dbs_tuners_ins.sampling_rate = boostpulse_relay_sr; - boostpulse_relayf = false; - if (policy->cur > Lboostpulse_value) - return; - - __cpufreq_driver_target(policy, Lboostpulse_value, - CPUFREQ_RELATION_H); - return; - } - - hotplug_history->usage[num_hist].freq = policy->cur; - hotplug_history->usage[num_hist].rq_avg = get_nr_run_avg(); - - /* add total_load, avg_load to get average load */ - rq_avg = hotplug_history->usage[num_hist].rq_avg; - - ++hotplug_history->num_hist; - - /* Get Absolute Load - in terms of freq */ - max_load_freq = 0; - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; - cputime64_t prev_wall_time, prev_idle_time, prev_iowait_time; - unsigned int idle_time, wall_time, iowait_time; - unsigned int load, load_freq; - int freq_avg; - - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - prev_wall_time = j_dbs_info->prev_cpu_wall; - prev_idle_time = j_dbs_info->prev_cpu_idle; - prev_iowait_time = j_dbs_info->prev_cpu_iowait; - - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - - wall_time = (unsigned int) cputime64_sub(cur_wall_time, - prev_wall_time); - j_dbs_info->prev_cpu_wall = cur_wall_time; - - idle_time = (unsigned int) cputime64_sub(cur_idle_time, - prev_idle_time); - j_dbs_info->prev_cpu_idle = cur_idle_time; - - iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, - prev_iowait_time); - j_dbs_info->prev_cpu_iowait = cur_iowait_time; - - if (dbs_tuners_ins.ignore_nice) { - cputime64_t cur_nice; - unsigned long cur_nice_jiffies; - - cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - - j_dbs_info->prev_cpu_nice; - /* - * Assumption: nice time between sampling periods will - * be less than 2^32 jiffies for 32 bit sys - */ - cur_nice_jiffies = (unsigned long) - cputime64_to_jiffies64(cur_nice); - - j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - idle_time += jiffies_to_usecs(cur_nice_jiffies); - } - - if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) - idle_time -= iowait_time; - - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - load = 100 * (wall_time - idle_time) / wall_time; - - /* keep load of each CPUs and combined load across all CPUs */ - if (cpu_online(j)) - load_each[j] = load; - total_load += load; - - hotplug_history->usage[num_hist].load[j] = load; - - freq_avg = __cpufreq_driver_getavg(policy, j); - if (freq_avg <= 0) - freq_avg = policy->cur; - - load_freq = load * freq_avg; - if (load_freq > max_load_freq) - max_load_freq = load_freq; - } - /* calculate the average load across all related CPUs */ - avg_load = total_load / num_online_cpus(); - hotplug_history->usage[num_hist].avg_load = avg_load; - //pr_info("LOAD_TIMER - %d - %d - %d - %d", max_load_freq/1000, total_load, avg_load, num_online_cpus()); - - /* Check for CPU hotplug */ - if (check_up()) { - queue_work_on(this_dbs_info->cpu, dvfs_workqueue, - &this_dbs_info->up_work); - } else if (check_down()) { - queue_work_on(this_dbs_info->cpu, dvfs_workqueue, - &this_dbs_info->down_work); - } - if (hotplug_history->num_hist == max_hotplug_rate) - hotplug_history->num_hist = 0; - - /* Check for frequency increase */ - if (policy->cur < dbs_tuners_ins.freq_for_responsiveness) - up_threshold = dbs_tuners_ins.up_threshold_at_min_freq; - /* for fast frequency decrease */ - else - up_threshold = dbs_tuners_ins.up_threshold; - - if (max_load_freq > up_threshold * policy->cur) { - /* for multiple freq_step */ - int inc = policy->max * (dbs_tuners_ins.freq_step - - DEF_FREQ_STEP_DEC * 2) / 100; - int target = 0; - - /* for multiple freq_step */ - if (max_load_freq > (up_threshold + DEF_UP_THRESHOLD_DIFF * 2) - * policy->cur) - inc = policy->max * dbs_tuners_ins.freq_step / 100; - else if (max_load_freq > (up_threshold + DEF_UP_THRESHOLD_DIFF) - * policy->cur) - inc = policy->max * (dbs_tuners_ins.freq_step - - DEF_FREQ_STEP_DEC) / 100; - - target = min(policy->max, policy->cur + inc); - - /* If switching to max speed, apply sampling_down_factor */ - if (policy->cur < policy->max && target == policy->max) - this_dbs_info->rate_mult = - dbs_tuners_ins.sampling_down_factor; - dbs_freq_increase(policy, target); - return; - } - - /* Check for frequency decrease */ -#ifndef CONFIG_ARCH_EXYNOS4 - /* if we cannot reduce the frequency anymore, break out early */ - if (policy->cur == policy->min) - return; -#endif - - /* - * The optimal frequency is the frequency that is the lowest that - * can support the current CPU usage without triggering the up - * policy. To be safe, we focus DOWN_DIFFERENTIAL points under - * the threshold. - */ - if (max_load_freq < - (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * - policy->cur) { - unsigned int freq_next; - unsigned int down_thres; - - freq_next = max_load_freq / - (dbs_tuners_ins.up_threshold - - dbs_tuners_ins.down_differential); - - /* No longer fully busy, reset rate_mult */ - this_dbs_info->rate_mult = 1; - - if (freq_next < policy->min) - freq_next = policy->min; - - - down_thres = dbs_tuners_ins.up_threshold_at_min_freq - - dbs_tuners_ins.down_differential; - - if (freq_next < dbs_tuners_ins.freq_for_responsiveness - && (max_load_freq / freq_next) > down_thres) - freq_next = dbs_tuners_ins.freq_for_responsiveness; - - if (policy->cur == freq_next) - return; - - __cpufreq_driver_target(policy, freq_next, - CPUFREQ_RELATION_L); - } -} - -/*extern void pegasusq_is_active(bool val); - -void boostpulse_relay_pq(void) -{ - if (Lboostpulse_value > 0) - { - //pr_info("BOOST_PULSE_FROM_INTERACTIVE"); - if (dbs_tuners_ins.sampling_rate != min_sampling_rate) - boostpulse_relay_sr = dbs_tuners_ins.sampling_rate; - boostpulse_relayf = true; - dbs_tuners_ins.sampling_rate = min_sampling_rate; - } -}*/ - -static void do_dbs_timer(struct work_struct *work) -{ - struct cpu_dbs_info_s *dbs_info = - container_of(work, struct cpu_dbs_info_s, work.work); - unsigned int cpu = dbs_info->cpu; - int delay; - - mutex_lock(&dbs_info->timer_mutex); - - dbs_check_cpu(dbs_info); - /* We want all CPUs to do sampling nearly on - * same jiffy - */ - delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate - * dbs_info->rate_mult); - - if (num_online_cpus() > 1) - delay -= jiffies % delay; - - queue_delayed_work_on(cpu, dvfs_workqueue, &dbs_info->work, delay); - mutex_unlock(&dbs_info->timer_mutex); -} - -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) -{ - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(DEF_START_DELAY * 1000 * 1000 - + dbs_tuners_ins.sampling_rate); - if (num_online_cpus() > 1) - delay -= jiffies % delay; - - INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - - queue_delayed_work_on(dbs_info->cpu, dvfs_workqueue, - &dbs_info->work, delay + 2 * HZ); -} - -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) -{ - cancel_delayed_work_sync(&dbs_info->work); - cancel_work_sync(&dbs_info->up_work); - cancel_work_sync(&dbs_info->down_work); -} - -static int reboot_notifier_call(struct notifier_block *this, - unsigned long code, void *_cmd) -{ - atomic_set(&g_hotplug_lock, 1); - return NOTIFY_DONE; -} - -static struct notifier_block reboot_notifier = { - .notifier_call = reboot_notifier_call, -}; - -#ifdef CONFIG_HAS_EARLYSUSPEND -static struct early_suspend early_suspend; -unsigned int prev_freq_step; -unsigned int prev_sampling_rate; -static void cpufreq_pegasusq_early_suspend(struct early_suspend *h) -{ -#if EARLYSUSPEND_HOTPLUGLOCK - dbs_tuners_ins.early_suspend = - atomic_read(&g_hotplug_lock); -#endif - prev_freq_step = dbs_tuners_ins.freq_step; - prev_sampling_rate = dbs_tuners_ins.sampling_rate; - dbs_tuners_ins.freq_step = 10; - dbs_tuners_ins.sampling_rate = 200000; -#if EARLYSUSPEND_HOTPLUGLOCK - atomic_set(&g_hotplug_lock, - (dbs_tuners_ins.min_cpu_lock) ? dbs_tuners_ins.min_cpu_lock : 1); - apply_hotplug_lock(); - stop_rq_work(); -#endif -} -static void cpufreq_pegasusq_late_resume(struct early_suspend *h) -{ -#if EARLYSUSPEND_HOTPLUGLOCK - atomic_set(&g_hotplug_lock, dbs_tuners_ins.early_suspend); -#endif - dbs_tuners_ins.early_suspend = -1; - dbs_tuners_ins.freq_step = prev_freq_step; - dbs_tuners_ins.sampling_rate = prev_sampling_rate; -#if EARLYSUSPEND_HOTPLUGLOCK - apply_hotplug_lock(); - start_rq_work(); -#endif -} -#endif - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - //pegasusq_is_active(true); - - prev_apenable = apget_enable_auto_hotplug(); - apenable_auto_hotplug(false); - - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - dbs_tuners_ins.max_freq = policy->max; - dbs_tuners_ins.min_freq = policy->min; - hotplug_history->num_hist = 0; - start_rq_work(); - - mutex_lock(&dbs_mutex); - - dbs_enable++; - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) { - j_dbs_info->prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - } - this_dbs_info->cpu = cpu; - this_dbs_info->rate_mult = 1; - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - rc = sysfs_create_group(cpufreq_global_kobject, - &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - min_sampling_rate = MIN_SAMPLING_RATE; - dbs_tuners_ins.sampling_rate = DEF_SAMPLING_RATE; - dbs_tuners_ins.io_is_busy = 0; - } - mutex_unlock(&dbs_mutex); - - register_reboot_notifier(&reboot_notifier); - - mutex_init(&this_dbs_info->timer_mutex); - dbs_timer_init(this_dbs_info); - -#if !EARLYSUSPEND_HOTPLUGLOCK - register_pm_notifier(&pm_notifier); -#endif -#ifdef CONFIG_HAS_EARLYSUSPEND - register_early_suspend(&early_suspend); -#endif - break; - - case CPUFREQ_GOV_STOP: - //pegasusq_is_active(false); - - apenable_auto_hotplug(prev_apenable); - -#ifdef CONFIG_HAS_EARLYSUSPEND - unregister_early_suspend(&early_suspend); -#endif -#if !EARLYSUSPEND_HOTPLUGLOCK - unregister_pm_notifier(&pm_notifier); -#endif - - dbs_timer_exit(this_dbs_info); - - mutex_lock(&dbs_mutex); - mutex_destroy(&this_dbs_info->timer_mutex); - - unregister_reboot_notifier(&reboot_notifier); - - dbs_enable--; - mutex_unlock(&dbs_mutex); - - stop_rq_work(); - - if (!dbs_enable) - sysfs_remove_group(cpufreq_global_kobject, - &dbs_attr_group); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_dbs_info->timer_mutex); - - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->max, - CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->min, - CPUFREQ_RELATION_L); - - mutex_unlock(&this_dbs_info->timer_mutex); - break; - } - return 0; -} - -static int __init cpufreq_gov_dbs_init(void) -{ - int ret; - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, 0); - - ret = init_rq_avg(); - if (ret) - return ret; - - INIT_WORK(&dbs_info->up_work, cpu_up_work); - INIT_WORK(&dbs_info->down_work, cpu_down_work); - - hotplug_history = kzalloc(sizeof(struct cpu_usage_history), GFP_KERNEL); - if (!hotplug_history) { - pr_err("%s cannot create hotplug history array\n", __func__); - ret = -ENOMEM; - goto err_hist; - } - - dvfs_workqueue = create_workqueue("kpegasusq"); - if (!dvfs_workqueue) { - pr_err("%s cannot create workqueue\n", __func__); - ret = -ENOMEM; - goto err_queue; - } - - ret = cpufreq_register_governor(&cpufreq_gov_pegasusq); - if (ret) - goto err_reg; - -#ifdef CONFIG_HAS_EARLYSUSPEND - early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; - early_suspend.suspend = cpufreq_pegasusq_early_suspend; - early_suspend.resume = cpufreq_pegasusq_late_resume; -#endif - - return ret; - -err_reg: - destroy_workqueue(dvfs_workqueue); -err_queue: - kfree(hotplug_history); -err_hist: - kfree(rq_data); - return ret; -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_pegasusq); - destroy_workqueue(dvfs_workqueue); - kfree(hotplug_history); - kfree(rq_data); -} - -MODULE_AUTHOR("ByungChang Cha "); -MODULE_DESCRIPTION("'cpufreq_pegasusq' - A dynamic cpufreq/cpuhotplug governor"); -MODULE_LICENSE("GPL"); - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PEGASUSQ -fs_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); -#endif -module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_smartassH3.c b/drivers/cpufreq/cpufreq_smartassH3.c deleted file mode 100644 index 7e0891ed..00000000 --- a/drivers/cpufreq/cpufreq_smartassH3.c +++ /dev/null @@ -1,904 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_smartassH3.c - * - * Copyright (C) 2010 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Author: Erasmux - * - * Based on the interactive governor By Mike Chan (mike@android.com) - * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net) - * - * SMP support based on mod by faux123 - * - * ZTE Skate specific tweaks by H3ROS @ MoDaCo, integrated by C3C0 @ MoDaCo - * - * For a general overview of smartassV2 see the relavent part in - * Documentation/cpu-freq/governors.txt - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -/******************** Tunable parameters: ********************/ - -/* - * The "ideal" frequency to use when awake. The governor will ramp up faster - * towards the ideal frequency and slower after it has passed it. Similarly, - * lowering the frequency towards the ideal frequency is faster than below it. - */ -#define DEFAULT_AWAKE_IDEAL_FREQ 378000 -static unsigned int awake_ideal_freq; - -/* - * The "ideal" frequency to use when suspended. - * When set to 0, the governor will not track the suspended state (meaning - * that practically when sleep_ideal_freq==0 the awake_ideal_freq is used - * also when suspended). - */ -#define DEFAULT_SLEEP_IDEAL_FREQ 378000 -static unsigned int sleep_ideal_freq; - -/* - * Freqeuncy delta when ramping up above the ideal freqeuncy. - * Zero disables and causes to always jump straight to max frequency. - * When below the ideal freqeuncy we always ramp up to the ideal freq. - */ -#define DEFAULT_RAMP_UP_STEP 80000 -static unsigned int ramp_up_step; - -/* - * Freqeuncy delta when ramping down below the ideal freqeuncy. - * Zero disables and will calculate ramp down according to load heuristic. - * When above the ideal freqeuncy we always ramp down to the ideal freq. - */ -#define DEFAULT_RAMP_DOWN_STEP 80000 -static unsigned int ramp_down_step; - -/* - * CPU freq will be increased if measured load > max_cpu_load; - */ -#define DEFAULT_MAX_CPU_LOAD 85 -static unsigned long max_cpu_load; - -/* - * CPU freq will be decreased if measured load < min_cpu_load; - */ -#define DEFAULT_MIN_CPU_LOAD 70 -static unsigned long min_cpu_load; - -/* - * The minimum amount of time to spend at a frequency before we can ramp up. - * Notice we ignore this when we are below the ideal frequency. - */ -#define DEFAULT_UP_RATE_US 48000; -static unsigned long up_rate_us; - -/* - * The minimum amount of time to spend at a frequency before we can ramp down. - * Notice we ignore this when we are above the ideal frequency. - */ -#define DEFAULT_DOWN_RATE_US 49000; -static unsigned long down_rate_us; - -/* - * The frequency to set when waking up from sleep. - * When sleep_ideal_freq=0 this will have no effect. - */ -#define DEFAULT_SLEEP_WAKEUP_FREQ 99999999 -static unsigned int sleep_wakeup_freq; - -/* - * Sampling rate, I highly recommend to leave it at 2. - */ -#define DEFAULT_SAMPLE_RATE_JIFFIES 2 -static unsigned int sample_rate_jiffies; - - -/*************** End of tunables ***************/ - - -static void (*pm_idle_old)(void); -static atomic_t active_count = ATOMIC_INIT(0); - -struct smartass_info_s { - struct cpufreq_policy *cur_policy; - struct cpufreq_frequency_table *freq_table; - struct timer_list timer; - u64 time_in_idle; - u64 idle_exit_time; - u64 freq_change_time; - u64 freq_change_time_in_idle; - int cur_cpu_load; - int old_freq; - int ramp_dir; - unsigned int enable; - int ideal_speed; -}; -static DEFINE_PER_CPU(struct smartass_info_s, smartass_info); - -/* Workqueues handle frequency scaling */ -static struct workqueue_struct *up_wq; -static struct workqueue_struct *down_wq; -static struct work_struct freq_scale_work; - -static cpumask_t work_cpumask; -static spinlock_t cpumask_lock; - -static unsigned int suspended; - -#define dprintk(flag,msg...) do { \ - if (debug_mask & flag) printk(KERN_DEBUG msg); \ - } while (0) - -enum { - SMARTASS_DEBUG_JUMPS=1, - SMARTASS_DEBUG_LOAD=2, - SMARTASS_DEBUG_ALG=4 -}; - -/* - * Combination of the above debug flags. - */ -static unsigned long debug_mask; - -static int cpufreq_governor_smartass_h3(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASSH3 -static -#endif -struct cpufreq_governor cpufreq_gov_smartass_h3 = { - .name = "smartassH3", - .governor = cpufreq_governor_smartass_h3, - .max_transition_latency = 9000000, - .owner = THIS_MODULE, -}; - -inline static void smartass_update_min_max(struct smartass_info_s *this_smartass, struct cpufreq_policy *policy, int suspend) { - if (suspend) { - this_smartass->ideal_speed = // sleep_ideal_freq; but make sure it obeys the policy min/max - policy->max > sleep_ideal_freq ? - (sleep_ideal_freq > policy->min ? sleep_ideal_freq : policy->min) : policy->max; - } else { - this_smartass->ideal_speed = // awake_ideal_freq; but make sure it obeys the policy min/max - policy->min < awake_ideal_freq ? - (awake_ideal_freq < policy->max ? awake_ideal_freq : policy->max) : policy->min; - } -} - -inline static void smartass_update_min_max_allcpus(void) { - unsigned int i; - for_each_online_cpu(i) { - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, i); - if (this_smartass->enable) - smartass_update_min_max(this_smartass,this_smartass->cur_policy,suspended); - } -} - -inline static unsigned int validate_freq(struct cpufreq_policy *policy, int freq) { - if (freq > (int)policy->max) - return policy->max; - if (freq < (int)policy->min) - return policy->min; - return freq; -} - -inline static void reset_timer(unsigned long cpu, struct smartass_info_s *this_smartass) { - this_smartass->time_in_idle = get_cpu_idle_time_us(cpu, &this_smartass->idle_exit_time); - mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies); -} - -inline static void work_cpumask_set(unsigned long cpu) { - unsigned long flags; - spin_lock_irqsave(&cpumask_lock, flags); - cpumask_set_cpu(cpu, &work_cpumask); - spin_unlock_irqrestore(&cpumask_lock, flags); -} - -inline static int work_cpumask_test_and_clear(unsigned long cpu) { - unsigned long flags; - int res = 0; - spin_lock_irqsave(&cpumask_lock, flags); - res = cpumask_test_and_clear_cpu(cpu, &work_cpumask); - spin_unlock_irqrestore(&cpumask_lock, flags); - return res; -} - -inline static int target_freq(struct cpufreq_policy *policy, struct smartass_info_s *this_smartass, - int new_freq, int old_freq, int prefered_relation) { - int index, target; - struct cpufreq_frequency_table *table = this_smartass->freq_table; - - if (new_freq == old_freq) - return 0; - new_freq = validate_freq(policy,new_freq); - if (new_freq == old_freq) - return 0; - - if (table && - !cpufreq_frequency_table_target(policy,table,new_freq,prefered_relation,&index)) - { - target = table[index].frequency; - if (target == old_freq) { - // if for example we are ramping up to *at most* current + ramp_up_step - // but there is no such frequency higher than the current, try also - // to ramp up to *at least* current + ramp_up_step. - if (new_freq > old_freq && prefered_relation==CPUFREQ_RELATION_H - && !cpufreq_frequency_table_target(policy,table,new_freq, - CPUFREQ_RELATION_L,&index)) - target = table[index].frequency; - // simlarly for ramping down: - else if (new_freq < old_freq && prefered_relation==CPUFREQ_RELATION_L - && !cpufreq_frequency_table_target(policy,table,new_freq, - CPUFREQ_RELATION_H,&index)) - target = table[index].frequency; - } - - if (target == old_freq) { - // We should not get here: - // If we got here we tried to change to a validated new_freq which is different - // from old_freq, so there is no reason for us to remain at same frequency. - printk(KERN_WARNING "Smartass: frequency change failed: %d to %d => %d\n", - old_freq,new_freq,target); - return 0; - } - } - else target = new_freq; - - __cpufreq_driver_target(policy, target, prefered_relation); - - dprintk(SMARTASS_DEBUG_JUMPS,"SmartassQ: jumping from %d to %d => %d (%d)\n", - old_freq,new_freq,target,policy->cur); - - return target; -} - -static void cpufreq_smartass_timer(unsigned long cpu) -{ - u64 delta_idle; - u64 delta_time; - int cpu_load; - int old_freq; - u64 update_time; - u64 now_idle; - int queued_work = 0; - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); - struct cpufreq_policy *policy = this_smartass->cur_policy; - - now_idle = get_cpu_idle_time_us(cpu, &update_time); - old_freq = policy->cur; - - if (this_smartass->idle_exit_time == 0 || update_time == this_smartass->idle_exit_time) - return; - - delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle); - delta_time = cputime64_sub(update_time, this_smartass->idle_exit_time); - - // If timer ran less than 1ms after short-term sample started, retry. - if (delta_time < 1000) { - if (!timer_pending(&this_smartass->timer)) - reset_timer(cpu,this_smartass); - return; - } - - if (delta_idle > delta_time) - cpu_load = 0; - else - cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time; - - dprintk(SMARTASS_DEBUG_LOAD,"smartassT @ %d: load %d (delta_time %llu)\n", - old_freq,cpu_load,delta_time); - - this_smartass->cur_cpu_load = cpu_load; - this_smartass->old_freq = old_freq; - - // Scale up if load is above max or if there where no idle cycles since coming out of idle, - // additionally, if we are at or above the ideal_speed, verify we have been at this frequency - // for at least up_rate_us: - if (cpu_load > max_cpu_load || delta_idle == 0) - { - if (old_freq < policy->max && - (old_freq < this_smartass->ideal_speed || delta_idle == 0 || - cputime64_sub(update_time, this_smartass->freq_change_time) >= up_rate_us)) - { - dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp up: load %d (delta_idle %llu)\n", - old_freq,cpu_load,delta_idle); - this_smartass->ramp_dir = 1; - work_cpumask_set(cpu); - queue_work(up_wq, &freq_scale_work); - queued_work = 1; - } - else this_smartass->ramp_dir = 0; - } - // Similarly for scale down: load should be below min and if we are at or below ideal - // frequency we require that we have been at this frequency for at least down_rate_us: - else if (cpu_load < min_cpu_load && old_freq > policy->min && - (old_freq > this_smartass->ideal_speed || - cputime64_sub(update_time, this_smartass->freq_change_time) >= down_rate_us)) - { - dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp down: load %d (delta_idle %llu)\n", - old_freq,cpu_load,delta_idle); - this_smartass->ramp_dir = -1; - work_cpumask_set(cpu); - queue_work(down_wq, &freq_scale_work); - queued_work = 1; - } - else this_smartass->ramp_dir = 0; - - // To avoid unnecessary load when the CPU is already at high load, we don't - // reset ourselves if we are at max speed. If and when there are idle cycles, - // the idle loop will activate the timer. - // Additionally, if we queued some work, the work task will reset the timer - // after it has done its adjustments. - if (!queued_work && old_freq < policy->max) - reset_timer(cpu,this_smartass); -} - -static void cpufreq_idle(void) -{ - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); - struct cpufreq_policy *policy = this_smartass->cur_policy; - - if (!this_smartass->enable) { - pm_idle_old(); - return; - } - - if (policy->cur == policy->min && timer_pending(&this_smartass->timer)) - del_timer(&this_smartass->timer); - - pm_idle_old(); - - if (!timer_pending(&this_smartass->timer)) - reset_timer(smp_processor_id(), this_smartass); -} - -static int cpufreq_idle_notifier(struct notifier_block *nb, - unsigned long val, void *data) { - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); - struct cpufreq_policy *policy = this_smartass->cur_policy; - - if (!this_smartass->enable) - return NOTIFY_DONE; - - if (val == IDLE_START) { - if (policy->cur == policy->max && !timer_pending(&this_smartass->timer)) { - reset_timer(smp_processor_id(), this_smartass); - } else if (policy->cur == policy->min) { - if (timer_pending(&this_smartass->timer)) - del_timer(&this_smartass->timer); - } - } else if (val == IDLE_END) { - if (policy->cur == policy->min && !timer_pending(&this_smartass->timer)) - reset_timer(smp_processor_id(), this_smartass); - } - - return NOTIFY_OK; -} -static struct notifier_block cpufreq_idle_nb = { - .notifier_call = cpufreq_idle_notifier, -}; - -/* We use the same work function to sale up and down */ -static void cpufreq_smartass_freq_change_time_work(struct work_struct *work) -{ - unsigned int cpu; - int new_freq; - int old_freq; - int ramp_dir; - struct smartass_info_s *this_smartass; - struct cpufreq_policy *policy; - unsigned int relation = CPUFREQ_RELATION_L; - for_each_possible_cpu(cpu) { - this_smartass = &per_cpu(smartass_info, cpu); - if (!work_cpumask_test_and_clear(cpu)) - continue; - - ramp_dir = this_smartass->ramp_dir; - this_smartass->ramp_dir = 0; - - old_freq = this_smartass->old_freq; - policy = this_smartass->cur_policy; - - if (old_freq != policy->cur) { - // frequency was changed by someone else? - printk(KERN_WARNING "Smartass: frequency changed by 3rd party: %d to %d\n", - old_freq,policy->cur); - new_freq = old_freq; - } - else if (ramp_dir > 0 && nr_running() > 1) { - // ramp up logic: - if (old_freq < this_smartass->ideal_speed) - new_freq = this_smartass->ideal_speed; - else if (ramp_up_step) { - new_freq = old_freq + ramp_up_step; - relation = CPUFREQ_RELATION_H; - } - else { - new_freq = policy->max; - relation = CPUFREQ_RELATION_H; - } - dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp up: ramp_dir=%d ideal=%d\n", - old_freq,ramp_dir,this_smartass->ideal_speed); - } - else if (ramp_dir < 0) { - // ramp down logic: - if (old_freq > this_smartass->ideal_speed) { - new_freq = this_smartass->ideal_speed; - relation = CPUFREQ_RELATION_H; - } - else if (ramp_down_step) - new_freq = old_freq - ramp_down_step; - else { - // Load heuristics: Adjust new_freq such that, assuming a linear - // scaling of load vs. frequency, the load in the new frequency - // will be max_cpu_load: - new_freq = old_freq * this_smartass->cur_cpu_load / max_cpu_load; - if (new_freq > old_freq) // min_cpu_load > max_cpu_load ?! - new_freq = old_freq -1; - } - dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp down: ramp_dir=%d ideal=%d\n", - old_freq,ramp_dir,this_smartass->ideal_speed); - } - else { // ramp_dir==0 ?! Could the timer change its mind about a queued ramp up/down - // before the work task gets to run? - // This may also happen if we refused to ramp up because the nr_running()==1 - new_freq = old_freq; - dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d nothing: ramp_dir=%d nr_running=%lu\n", - old_freq,ramp_dir,nr_running()); - } - - // do actual ramp up (returns 0, if frequency change failed): - new_freq = target_freq(policy,this_smartass,new_freq,old_freq,relation); - if (new_freq) - this_smartass->freq_change_time_in_idle = - get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time); - - // reset timer: - if (new_freq < policy->max) - reset_timer(cpu,this_smartass); - // if we are maxed out, it is pointless to use the timer - // (idle cycles wake up the timer when the timer comes) - else if (timer_pending(&this_smartass->timer)) - del_timer(&this_smartass->timer); - - cpufreq_notify_utilization(policy, - (this_smartass->cur_cpu_load * policy->cur) / policy->max); - } -} - -static ssize_t show_debug_mask(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", debug_mask); -} - -static ssize_t store_debug_mask(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0) - debug_mask = input; - return res; -} - -static ssize_t show_up_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", up_rate_us); -} - -static ssize_t store_up_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0 && input <= 100000000) - up_rate_us = input; - return res; -} - -static ssize_t show_down_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", down_rate_us); -} - -static ssize_t store_down_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0 && input <= 100000000) - down_rate_us = input; - return res; -} - -static ssize_t show_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", sleep_ideal_freq); -} - -static ssize_t store_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) { - sleep_ideal_freq = input; - if (suspended) - smartass_update_min_max_allcpus(); - } - return res; -} - -static ssize_t show_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", sleep_wakeup_freq); -} - -static ssize_t store_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - sleep_wakeup_freq = input; - return res; -} - -static ssize_t show_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", awake_ideal_freq); -} - -static ssize_t store_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) { - awake_ideal_freq = input; - if (!suspended) - smartass_update_min_max_allcpus(); - } - return res; -} - -static ssize_t show_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", sample_rate_jiffies); -} - -static ssize_t store_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input <= 1000) - sample_rate_jiffies = input; - return res; -} - -static ssize_t show_ramp_up_step(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", ramp_up_step); -} - -static ssize_t store_ramp_up_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - ramp_up_step = input; - return res; -} - -static ssize_t show_ramp_down_step(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", ramp_down_step); -} - -static ssize_t store_ramp_down_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - ramp_down_step = input; - return res; -} - -static ssize_t show_max_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", max_cpu_load); -} - -static ssize_t store_max_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input <= 100) - max_cpu_load = input; - return res; -} - -static ssize_t show_min_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", min_cpu_load); -} - -static ssize_t store_min_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input < 100) - min_cpu_load = input; - return res; -} - -#define define_global_rw_attr(_name) \ -static struct global_attr _name##_attr = \ - __ATTR(_name, 0644, show_##_name, store_##_name) - -define_global_rw_attr(debug_mask); -define_global_rw_attr(up_rate_us); -define_global_rw_attr(down_rate_us); -define_global_rw_attr(sleep_ideal_freq); -define_global_rw_attr(sleep_wakeup_freq); -define_global_rw_attr(awake_ideal_freq); -define_global_rw_attr(sample_rate_jiffies); -define_global_rw_attr(ramp_up_step); -define_global_rw_attr(ramp_down_step); -define_global_rw_attr(max_cpu_load); -define_global_rw_attr(min_cpu_load); - -static struct attribute * smartass_attributes[] = { - &debug_mask_attr.attr, - &up_rate_us_attr.attr, - &down_rate_us_attr.attr, - &sleep_ideal_freq_attr.attr, - &sleep_wakeup_freq_attr.attr, - &awake_ideal_freq_attr.attr, - &sample_rate_jiffies_attr.attr, - &ramp_up_step_attr.attr, - &ramp_down_step_attr.attr, - &max_cpu_load_attr.attr, - &min_cpu_load_attr.attr, - NULL, -}; - -static struct attribute_group smartass_attr_group = { - .attrs = smartass_attributes, - .name = "smartassH3", -}; - -static int cpufreq_governor_smartass_h3(struct cpufreq_policy *new_policy, - unsigned int event) -{ - unsigned int cpu = new_policy->cpu; - int rc; - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!new_policy->cur)) - return -EINVAL; - - this_smartass->cur_policy = new_policy; - - this_smartass->enable = 1; - - smartass_update_min_max(this_smartass,new_policy,suspended); - - this_smartass->freq_table = cpufreq_frequency_get_table(cpu); - if (!this_smartass->freq_table) - printk(KERN_WARNING "Smartass: no frequency table for cpu %d?!\n",cpu); - - smp_wmb(); - - // Do not register the idle hook and create sysfs - // entries if we have already done so. - if (atomic_inc_return(&active_count) <= 1) { - rc = sysfs_create_group(cpufreq_global_kobject, - &smartass_attr_group); - if (rc) - return rc; - - pm_idle_old = pm_idle; - pm_idle = cpufreq_idle; - idle_notifier_register(&cpufreq_idle_nb); - } - - if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer)) - reset_timer(cpu,this_smartass); - - break; - - case CPUFREQ_GOV_LIMITS: - smartass_update_min_max(this_smartass,new_policy,suspended); - - if (this_smartass->cur_policy->cur > new_policy->max) { - dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new max freq: %d\n",new_policy->max); - __cpufreq_driver_target(this_smartass->cur_policy, - new_policy->max, CPUFREQ_RELATION_H); - } - else if (this_smartass->cur_policy->cur < new_policy->min) { - dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new min freq: %d\n",new_policy->min); - __cpufreq_driver_target(this_smartass->cur_policy, - new_policy->min, CPUFREQ_RELATION_L); - } - - if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer)) - reset_timer(cpu,this_smartass); - - break; - - case CPUFREQ_GOV_STOP: - this_smartass->enable = 0; - smp_wmb(); - del_timer(&this_smartass->timer); - flush_work(&freq_scale_work); - this_smartass->idle_exit_time = 0; - - if (atomic_dec_return(&active_count) <= 1) { - sysfs_remove_group(cpufreq_global_kobject, - &smartass_attr_group); - pm_idle = pm_idle_old; - idle_notifier_unregister(&cpufreq_idle_nb); - } - break; - } - - return 0; -} - -static void smartass_suspend(int cpu, int suspend) -{ - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); - struct cpufreq_policy *policy = this_smartass->cur_policy; - unsigned int new_freq; - - if (!this_smartass->enable) - return; - - smartass_update_min_max(this_smartass,policy,suspend); - if (!suspend) { // resume at max speed: - new_freq = validate_freq(policy,sleep_wakeup_freq); - - dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: awaking at %d\n",new_freq); - - __cpufreq_driver_target(policy, new_freq, - CPUFREQ_RELATION_L); - } else { - // to avoid wakeup issues with quick sleep/wakeup don't change actual frequency when entering sleep - // to allow some time to settle down. Instead we just reset our statistics (and reset the timer). - // Eventually, the timer will adjust the frequency if necessary. - - this_smartass->freq_change_time_in_idle = - get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time); - - dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: suspending at %d\n",policy->cur); - } - - reset_timer(smp_processor_id(),this_smartass); -} - -static void smartass_early_suspend(struct early_suspend *handler) { - int i; - if (suspended || sleep_ideal_freq==0) // disable behavior for sleep_ideal_freq==0 - return; - suspended = 1; - for_each_online_cpu(i) - smartass_suspend(i,1); -} - -static void smartass_late_resume(struct early_suspend *handler) { - int i; - if (!suspended) // already not suspended so nothing to do - return; - suspended = 0; - for_each_online_cpu(i) - smartass_suspend(i,0); -} - -static struct early_suspend smartass_power_suspend = { - .suspend = smartass_early_suspend, - .resume = smartass_late_resume, -#ifdef CONFIG_MACH_HERO - .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, -#endif -}; - -static int __init cpufreq_smartass_init(void) -{ - unsigned int i; - struct smartass_info_s *this_smartass; - debug_mask = 0; - up_rate_us = DEFAULT_UP_RATE_US; - down_rate_us = DEFAULT_DOWN_RATE_US; - sleep_ideal_freq = DEFAULT_SLEEP_IDEAL_FREQ; - sleep_wakeup_freq = DEFAULT_SLEEP_WAKEUP_FREQ; - awake_ideal_freq = DEFAULT_AWAKE_IDEAL_FREQ; - sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES; - ramp_up_step = DEFAULT_RAMP_UP_STEP; - ramp_down_step = DEFAULT_RAMP_DOWN_STEP; - max_cpu_load = DEFAULT_MAX_CPU_LOAD; - min_cpu_load = DEFAULT_MIN_CPU_LOAD; - - spin_lock_init(&cpumask_lock); - - suspended = 0; - - /* Initalize per-cpu data: */ - for_each_possible_cpu(i) { - this_smartass = &per_cpu(smartass_info, i); - this_smartass->enable = 0; - this_smartass->cur_policy = 0; - this_smartass->ramp_dir = 0; - this_smartass->time_in_idle = 0; - this_smartass->idle_exit_time = 0; - this_smartass->freq_change_time = 0; - this_smartass->freq_change_time_in_idle = 0; - this_smartass->cur_cpu_load = 0; - // intialize timer: - init_timer_deferrable(&this_smartass->timer); - this_smartass->timer.function = cpufreq_smartass_timer; - this_smartass->timer.data = i; - work_cpumask_test_and_clear(i); - } - - // Scale up is high priority - up_wq = create_workqueue("ksmartass_up"); - down_wq = create_workqueue("ksmartass_down"); - if (!up_wq || !down_wq) - return -ENOMEM; - - INIT_WORK(&freq_scale_work, cpufreq_smartass_freq_change_time_work); - - register_early_suspend(&smartass_power_suspend); - - return cpufreq_register_governor(&cpufreq_gov_smartass_h3); -} - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASSH3 -fs_initcall(cpufreq_smartass_init); -#else -module_init(cpufreq_smartass_init); -#endif - -static void __exit cpufreq_smartass_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_smartass_h3); - destroy_workqueue(up_wq); - destroy_workqueue(down_wq); -} - -module_exit(cpufreq_smartass_exit); - -MODULE_AUTHOR ("Erasmux, moded by H3ROS & C3C0"); -MODULE_DESCRIPTION ("'cpufreq_smartassH3' - A smart cpufreq governor"); -MODULE_LICENSE ("GPL"); - From ccb16215edd817ba928ef976d716d2ffa957271a Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 07:56:43 -0400 Subject: [PATCH 05/35] cpufreq:Remerge Conflicts with no warnings Conflicts: arch/arm/configs/KT_jf_defconfig drivers/cpufreq/Kconfig drivers/cpufreq/Makefile drivers/cpufreq/cpufreq_badass.c --- arch/arm/mach-msm/include/mach/ppmu.h | 122 ++ drivers/cpufreq/Kconfig | 152 ++- drivers/cpufreq/Makefile | 19 +- drivers/cpufreq/cpufreq_abyssplug.c | 817 ++++++++++++ drivers/cpufreq/cpufreq_adaptive.c | 952 ++++++++++++++ drivers/cpufreq/cpufreq_badass.c | 42 +- drivers/cpufreq/cpufreq_nightmare.c | 1656 +++++++++++++++++++++++++ drivers/cpufreq/cpufreq_pegasusq.c | 1636 ++++++++++++++++++++++++ drivers/cpufreq/cpufreq_slp.c | 1438 +++++++++++++++++++++ drivers/cpufreq/cpufreq_smartassH3.c | 904 ++++++++++++++ include/asm-generic/cputime.h | 1 + 11 files changed, 7667 insertions(+), 72 deletions(-) create mode 100644 arch/arm/mach-msm/include/mach/ppmu.h create mode 100644 drivers/cpufreq/cpufreq_abyssplug.c create mode 100644 drivers/cpufreq/cpufreq_adaptive.c create mode 100644 drivers/cpufreq/cpufreq_nightmare.c create mode 100644 drivers/cpufreq/cpufreq_pegasusq.c create mode 100644 drivers/cpufreq/cpufreq_slp.c create mode 100644 drivers/cpufreq/cpufreq_smartassH3.c diff --git a/arch/arm/mach-msm/include/mach/ppmu.h b/arch/arm/mach-msm/include/mach/ppmu.h new file mode 100644 index 00000000..684ce5cd --- /dev/null +++ b/arch/arm/mach-msm/include/mach/ppmu.h @@ -0,0 +1,122 @@ +/* linux/arch/arm/mach-exynos/include/mach/ppmu.h + * + * Copyright (c) 2010 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * EXYNOS4 - PPMU support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __ASM_ARCH_PPMU_H +#define __ASM_ARCH_PPMU_H __FILE__ + +#define NUMBER_OF_COUNTER 4 + +#define PPMU_CNTENS 0x10 +#define PPMU_CNTENC 0x20 +#define PPMU_INTENS 0x30 +#define PPMU_INTENC 0x40 +#define PPMU_FLAG 0x50 + +#define PPMU_CCNT 0x100 +#define PPMU_PMCNT0 0x110 +#define PPMU_PMCNT_OFFSET 0x10 + +#define PPMU_BEVT0SEL 0x1000 +#define PPMU_BEVTSEL_OFFSET 0x100 +#define PPMU_CNT_RESET 0x1800 + +#define DEVT0_SEL 0x1000 +#define DEVT0_ID 0x1010 +#define DEVT0_IDMSK 0x1014 +#define DEVT_ID_OFFSET 0x100 + +#define DEFAULT_WEIGHT 1 + +#define MAX_CCNT 100 + +/* For flags */ +#define VIDEO_DOMAIN 0x00000001 +#define AUDIO_DOMAIN 0x00000002 +#define ALL_DOMAIN 0xffffffff + +/* For event */ +#define RD_DATA_COUNT 0x00000005 +#define WR_DATA_COUNT 0x00000006 +#define RDWR_DATA_COUNT 0x00000007 + +#define PMCNT_OFFSET(i) (PPMU_PMCNT0 + (PPMU_PMCNT_OFFSET * i)) + +enum ppmu_counter { + PPMU_PMNCNT0, + PPMU_PMCCNT1, + PPMU_PMNCNT2, + PPMU_PMNCNT3, + PPMU_PMNCNT_MAX, +}; + +enum ppmu_ch { + DMC0, + DMC1, +}; + +enum ppmu_type { + PPMU_MIF, + PPMU_INT, + PPMU_TYPE_END, +}; + +enum exynos4_ppmu { + PPMU_DMC0, + PPMU_DMC1, + PPMU_CPU, +#ifdef CONFIG_ARCH_EXYNOS5 + PPMU_DDR_C, + PPMU_DDR_R1, + PPMU_DDR_L, + PPMU_RIGHT0_BUS, +#endif + PPMU_END, +}; + +extern unsigned long long ppmu_load[PPMU_END]; +extern unsigned long long ppmu_load_detail[2][PPMU_END]; + +struct exynos4_ppmu_hw { + struct list_head node; + void __iomem *hw_base; + unsigned int ccnt; + unsigned int event[NUMBER_OF_COUNTER]; + unsigned int weight; + int usage; + int id; + unsigned int flags; + struct device *dev; + unsigned int count[NUMBER_OF_COUNTER]; +}; + +void exynos4_ppc_reset(struct exynos4_ppmu_hw *ppmu); +void exynos4_ppc_start(struct exynos4_ppmu_hw *ppmu); +void exynos4_ppc_stop(struct exynos4_ppmu_hw *ppmu); +void exynos4_ppc_setevent(struct exynos4_ppmu_hw *ppmu, + unsigned int evt_num); +unsigned long long exynos4_ppc_update(struct exynos4_ppmu_hw *ppmu); + +void exynos4_ppmu_reset(struct exynos4_ppmu_hw *ppmu); +void exynos4_ppmu_start(struct exynos4_ppmu_hw *ppmu); +void exynos4_ppmu_stop(struct exynos4_ppmu_hw *ppmu); +void exynos4_ppmu_setevent(struct exynos4_ppmu_hw *ppmu, + unsigned int evt_num); +unsigned long long exynos4_ppmu_update(struct exynos4_ppmu_hw *ppmu, int ch); + +void ppmu_init(struct exynos4_ppmu_hw *ppmu, struct device *dev); +void ppmu_start(struct device *dev); +void ppmu_update(struct device *dev, int ch); +void ppmu_reset(struct device *dev); + +extern struct exynos4_ppmu_hw exynos_ppmu[]; +#endif /* __ASM_ARCH_PPMU_H */ + diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index be30f8aa..64452292 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -50,6 +50,29 @@ choice This option sets which CPUFreq governor shall be loaded at startup. If in doubt, select 'performance'. +config CPU_FREQ_DEFAULT_GOV_ABYSSPLUG + bool "abyssplug" + select CPU_FREQ_GOV_ABYSSPLUG + select CPU_FREQ_GOV_PERFORMANCE + ---help--- + Use the CPUFreq governor 'abyssplug' as default. This allows you + to get a full dynamic frequency capable system with CPU + hotplug support by simply loading your cpufreq low-level + hardware driver. Be aware that not all cpufreq drivers + support the hotplug governor. If unsure have a look at + the help section of the driver. Fallback governor will be the + performance governor. + +config CPU_FREQ_DEFAULT_GOV_ADAPTIVE + bool "adaptive" + select CPU_FREQ_GOV_ADAPTIVE + help + Use the CPUFreq governor 'adaptive' as default. This allows + you to get a full dynamic cpu frequency capable system by simply + loading your cpufreq low-level hardware driver, using the + 'adaptive' governor for latency-sensitive workloads and demanding + performance. + config CPU_FREQ_DEFAULT_GOV_ASSWAX bool "asswax" select CPU_FREQ_GOV_ASSWAX @@ -68,19 +91,6 @@ config CPU_FREQ_DEFAULT_GOV_BADASS governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. -config CPU_FREQ_DEFAULT_GOV_BADASS - bool "badass" - select CPU_FREQ_GOV_BADASS - help - 'badass' - This driver adds a dynamic cpufreq policy governor. - The governor does a periodic polling and - changes frequency based on the CPU utilization. - The support for this governor depends on CPU capability to - do fast frequency switching (i.e, very low latency frequency - transitions). - - If in doubt, say N - config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE bool "conservative" select CPU_FREQ_GOV_CONSERVATIVE @@ -98,19 +108,19 @@ config CPU_FREQ_DEFAULT_GOV_DANCEDANCE select CPU_FREQ_GOV_DANCEDANCE help -config CPU_FREQ_DEFAULT_GOV_LIONHEART - bool "lionheart" - select CPU_FREQ_GOV_LIONHEART - help - Use the CPUFreq governor 'lionheart' as default - -config CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND - - bool "intellidemand" - select CPU_FREQ_GOV_INTELLIDEMAND +config CPU_FREQ_DEFAULT_GOV_INTERACTIVE + bool "interactive" + select CPU_FREQ_GOV_INTERACTIVE help - Use the CPUFreq governor 'intellidemand' as default. This is - based on Ondemand with browsing detection based on GPU loading + Use the CPUFreq governor 'interactive' as default. This allows + you to get a full dynamic cpu frequency capable system by simply + loading your cpufreq low-level hardware driver, using the + 'interactive' governor for latency-sensitive workloads. + +config CPU_FREQ_DEFAULT_GOV_NIGHTMARE + bool "nightmare" + select CPU_FREQ_GOV_NIGHTMARE + help config CPU_FREQ_DEFAULT_GOV_ONDEMAND bool "ondemand" @@ -124,6 +134,12 @@ config CPU_FREQ_DEFAULT_GOV_ONDEMAND governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. +config CPU_FREQ_DEFAULT_GOV_PEGASUSQ + bool "pegasusq" + select CPU_FREQ_GOV_PEGASUSQ + help + Use the CPUFreq governor 'pegasusq' as default. + config CPU_FREQ_DEFAULT_GOV_SLP bool "slp" select CPU_FREQ_GOV_SLP @@ -147,6 +163,12 @@ config CPU_FREQ_DEFAULT_GOV_POWERSAVE the frequency statically to the lowest frequency supported by the CPU. +config CPU_FREQ_DEFAULT_GOV_SMARTASSH3 + bool "smartassH3" + select CPU_FREQ_GOV_SMARTASSH3 + help + Use the CPUFreq governor 'slp' as default. + config CPU_FREQ_DEFAULT_GOV_USERSPACE bool "userspace" select CPU_FREQ_GOV_USERSPACE @@ -165,19 +187,37 @@ config CPU_FREQ_DEFAULT_GOV_WHEATLEY endchoice -config CPU_FREQ_GOV_LIONHEART - tristate "lionheart" - depends on CPU_FREQ - help - Use the CPUFreq governor 'lionheart' as default. +config CPU_FREQ_GOV_ABYSSPLUG + tristate "'abyssplug' cpufreq governor" + depends on CPU_FREQ && NO_HZ && HOTPLUG_CPU + ---help--- + 'abyssplug' - this driver mimics the frequency scaling behavior + in 'ondemand', but with several key differences. First is + that frequency transitions use the CPUFreq table directly, + instead of incrementing in a percentage of the maximum + available frequency. Second 'abyssplug' will offline auxillary + CPUs when the system is idle, and online those CPUs once the + system becomes busy again. This last feature is needed for + architectures which transition to low power states when only + the "master" CPU is online, or for thermally constrained + devices. + If you don't have one of these architectures or devices, use + 'ondemand' instead. + If in doubt, say N. -config CPU_FREQ_GOV_USERSPACE - tristate "'userspace' governor for userspace frequency scaling" +config CPU_FREQ_GOV_ADAPTIVE + tristate "'adaptive' cpufreq policy governor" help - Enable this cpufreq governor when you either want to set the - CPU frequency manually or when a userspace program shall - be able to set the CPU dynamically, like on LART - . + 'adaptive' - This driver adds a dynamic cpufreq policy governor + designed for latency-sensitive workloads and also for demanding + performance. + + This governor attempts to reduce the latency of clock + increases so that the system is more responsive to + interactive workloads in loweset steady-state but to + to reduce power consumption in middle operation level level up + will be done in step by step to prohibit system from going to + max operation level. To compile this driver as a module, choose M here: the module will be called cpufreq_adaptive. @@ -232,23 +272,26 @@ config CPU_FREQ_GOV_DANCEDANCE tristate "'dancedance' cpufreq governor" depends on CPU_FREQ -config CPU_FREQ_GOV_INTELLIDEMAND - tristate "'intellidemand' cpufreq policy governor" - select CPU_FREQ_TABLE - help - 'intellidemand' - This driver adds a dynamic cpufreq policy governor. - The governor does a periodic polling and - changes frequency based on the CPU utilization. - The support for this governor depends on CPU capability to - do fast frequency switching (i.e, very low latency frequency - transitions). with browsing detection based on GPU loading +config CPU_FREQ_GOV_INTERACTIVE + tristate "'interactive' cpufreq policy governor" + help + 'interactive' - This driver adds a dynamic cpufreq policy governor + designed for latency-sensitive workloads. + + This governor attempts to reduce the latency of clock + increases so that the system is more responsive to + interactive workloads. - To compile this driver as a module, choose M here: the - module will be called cpufreq_ondemand. + To compile this driver as a module, choose M here: the + module will be called cpufreq_interactive. - For details, take a look at linux/Documentation/cpu-freq. + For details, take a look at linux/Documentation/cpu-freq. - If in doubt, say N. + If in doubt, say N. + +config CPU_FREQ_GOV_NIGHTMARE + tristate "'nightmare' cpufreq governor" + depends on CPU_FREQ config CPU_FREQ_GOV_ONDEMAND tristate "'ondemand' cpufreq policy governor" @@ -279,6 +322,9 @@ config CPU_FREQ_GOV_PERFORMANCE If in doubt, say Y. +config CPU_FREQ_GOV_PEGASUSQ + tristate "'pegasusq' cpufreq policy governor" + config CPU_FREQ_GOV_POWERSAVE tristate "'powersave' governor" help @@ -293,6 +339,12 @@ config CPU_FREQ_GOV_POWERSAVE config CPU_FREQ_GOV_SLP tristate "'slp' cpufreq policy governor" +config CPU_FREQ_GOV_SMARTASSH3 + tristate "'smartassH3' cpufreq governor" + depends on CPU_FREQ + help + 'smartassH3' - a "smart" governor + config CPU_FREQ_GOV_USERSPACE tristate "'userspace' governor for userspace frequency scaling" help diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 09c5ea20..c65736f3 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -4,19 +4,24 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq.o obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o # CPUfreq governors +obj-$(CONFIG_CPU_FREQ_GOV_ABYSSPLUG) += cpufreq_abyssplug.o +obj-$(CONFIG_CPU_FREQ_GOV_ADAPTIVE) += cpufreq_adaptive.o +obj-$(CONFIG_CPU_FREQ_GOV_ASSWAX) += cpufreq_asswax.o +obj-$(CONFIG_CPU_FREQ_GOV_BADASS) += cpufreq_badass.o +obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o +obj-$(CONFIG_CPU_FREQ_GOV_DANCEDANCE) += cpufreq_dancedance.o +obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o +obj-$(CONFIG_CPU_FREQ_GOV_NIGHTMARE) += cpufreq_nightmare.o +obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o +obj-$(CONFIG_CPU_FREQ_GOV_PEGASUSQ) += cpufreq_pegasusq.o obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o +obj-$(CONFIG_CPU_FREQ_GOV_SLP) += cpufreq_slp.o +obj-$(CONFIG_CPU_FREQ_GOV_SMARTASSH3) += cpufreq_smartassH3.o obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o -obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o -obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o -obj-$(CONFIG_CPU_FREQ_GOV_BADASS) += cpufreq_badass.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o -obj-$(CONFIG_CPU_FREQ_GOV_ASSWAX) += cpufreq_asswax.o -obj-$(CONFIG_CPU_FREQ_GOV_DANCEDANCE) += cpufreq_dancedance.o - - # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/drivers/cpufreq/cpufreq_abyssplug.c b/drivers/cpufreq/cpufreq_abyssplug.c new file mode 100644 index 00000000..37df4463 --- /dev/null +++ b/drivers/cpufreq/cpufreq_abyssplug.c @@ -0,0 +1,817 @@ +/* + * CPUFreq AbyssPlug governor + * + * + * Based on hotplug governor + * Copyright (C) 2010 Texas Instruments, Inc. + * Mike Turquette + * Santosh Shilimkar + * + * Based on ondemand governor + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi , + * Jun Nakajima + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* greater than 95% avg load across online CPUs increases frequency */ +#define DEFAULT_UP_FREQ_MIN_LOAD (95) + +/* Keep 10% of idle under the up threshold when decreasing the frequency */ +#define DEFAULT_FREQ_DOWN_DIFFERENTIAL (1) + +/* less than 40% avg load across online CPUs decreases frequency */ +#define DEFAULT_DOWN_FREQ_MAX_LOAD (40) + +/* default sampling period (uSec) is bogus; 10x ondemand's default for x86 */ +#define DEFAULT_SAMPLING_PERIOD (50000) + +/* default number of sampling periods to average before hotplug-in decision */ +#define DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS (5) + +/* default number of sampling periods to average before hotplug-out decision */ +#define DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS (20) + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); +//static int hotplug_boost(struct cpufreq_policy *policy); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ABYSSPLUG +static +#endif +struct cpufreq_governor cpufreq_gov_abyssplug = { + .name = "abyssplug", + .governor = cpufreq_governor_dbs, + .owner = THIS_MODULE, +}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct work_struct cpu_up_work; + struct work_struct cpu_down_work; + struct cpufreq_frequency_table *freq_table; + int cpu; + unsigned int boost_applied; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, hp_cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on + * different CPUs. It protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct workqueue_struct *khotplug_wq; + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int down_threshold; + unsigned int hotplug_in_sampling_periods; + unsigned int hotplug_out_sampling_periods; + unsigned int hotplug_load_index; + unsigned int *hotplug_load_history; + unsigned int ignore_nice; + unsigned int io_is_busy; + unsigned int boost_timeout; +} dbs_tuners_ins = { + .sampling_rate = DEFAULT_SAMPLING_PERIOD, + .up_threshold = DEFAULT_UP_FREQ_MIN_LOAD, + .down_differential = DEFAULT_FREQ_DOWN_DIFFERENTIAL, + .down_threshold = DEFAULT_DOWN_FREQ_MAX_LOAD, + .hotplug_in_sampling_periods = DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS, + .hotplug_out_sampling_periods = DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS, + .hotplug_load_index = 0, + .ignore_nice = 0, + .io_is_busy = 0, + .boost_timeout = 0, +}; + +/* + * A corner case exists when switching io_is_busy at run-time: comparing idle + * times from a non-io_is_busy period to an io_is_busy period (or vice-versa) + * will misrepresent the actual change in system idleness. We ignore this + * corner case: enabling io_is_busy might cause freq increase and disabling + * might cause freq decrease, which probably matches the original intent. + */ +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time; + u64 iowait_time; + + /* cpufreq-abyssplug always assumes CONFIG_NO_HZ */ + idle_time = get_cpu_idle_time_us(cpu, wall); + + /* add time spent doing I/O to idle time */ + if (dbs_tuners_ins.io_is_busy) { + iowait_time = get_cpu_iowait_time_us(cpu, wall); + /* cpufreq-abyssplug always assumes CONFIG_NO_HZ */ + if (iowait_time != -1ULL && idle_time >= iowait_time) + idle_time -= iowait_time; + } + + return idle_time; +} + +/************************** sysfs interface ************************/ + +/* XXX look at global sysfs macros in cpufreq.h, can those be used here? */ + +/* cpufreq_abyssplug Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(up_threshold, up_threshold); +show_one(down_differential, down_differential); +show_one(down_threshold, down_threshold); +show_one(hotplug_in_sampling_periods, hotplug_in_sampling_periods); +show_one(hotplug_out_sampling_periods, hotplug_out_sampling_periods); +show_one(ignore_nice_load, ignore_nice); +show_one(io_is_busy, io_is_busy); +show_one(boost_timeout, boost_timeout); + +static ssize_t store_boost_timeout(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.boost_timeout = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_rate = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input <= dbs_tuners_ins.down_threshold) { + return -EINVAL; + } + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.up_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_down_differential(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input >= dbs_tuners_ins.up_threshold) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.down_differential = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input >= dbs_tuners_ins.up_threshold) { + return -EINVAL; + } + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.down_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_hotplug_in_sampling_periods(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + unsigned int *temp; + unsigned int max_windows; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + /* already using this value, bail out */ + if (input == dbs_tuners_ins.hotplug_in_sampling_periods) + return count; + + mutex_lock(&dbs_mutex); + ret = count; + max_windows = max(dbs_tuners_ins.hotplug_in_sampling_periods, + dbs_tuners_ins.hotplug_out_sampling_periods); + + /* no need to resize array */ + if (input <= max_windows) { + dbs_tuners_ins.hotplug_in_sampling_periods = input; + goto out; + } + + /* resize array */ + temp = kmalloc((sizeof(unsigned int) * input), GFP_KERNEL); + + if (!temp || IS_ERR(temp)) { + ret = -ENOMEM; + goto out; + } + + memcpy(temp, dbs_tuners_ins.hotplug_load_history, + (max_windows * sizeof(unsigned int))); + kfree(dbs_tuners_ins.hotplug_load_history); + + /* replace old buffer, old number of sampling periods & old index */ + dbs_tuners_ins.hotplug_load_history = temp; + dbs_tuners_ins.hotplug_in_sampling_periods = input; + dbs_tuners_ins.hotplug_load_index = max_windows; +out: + mutex_unlock(&dbs_mutex); + + return ret; +} + +static ssize_t store_hotplug_out_sampling_periods(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + unsigned int *temp; + unsigned int max_windows; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + /* already using this value, bail out */ + if (input == dbs_tuners_ins.hotplug_out_sampling_periods) + return count; + + mutex_lock(&dbs_mutex); + ret = count; + max_windows = max(dbs_tuners_ins.hotplug_in_sampling_periods, + dbs_tuners_ins.hotplug_out_sampling_periods); + + /* no need to resize array */ + if (input <= max_windows) { + dbs_tuners_ins.hotplug_out_sampling_periods = input; + goto out; + } + + /* resize array */ + temp = kmalloc((sizeof(unsigned int) * input), GFP_KERNEL); + + if (!temp || IS_ERR(temp)) { + ret = -ENOMEM; + goto out; + } + + memcpy(temp, dbs_tuners_ins.hotplug_load_history, + (max_windows * sizeof(unsigned int))); + kfree(dbs_tuners_ins.hotplug_load_history); + + /* replace old buffer, old number of sampling periods & old index */ + dbs_tuners_ins.hotplug_load_history = temp; + dbs_tuners_ins.hotplug_out_sampling_periods = input; + dbs_tuners_ins.hotplug_load_index = max_windows; +out: + mutex_unlock(&dbs_mutex); + + return ret; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + mutex_lock(&dbs_mutex); + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + mutex_unlock(&dbs_mutex); + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(hp_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + + } + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.io_is_busy = !!input; + mutex_unlock(&dbs_mutex); + + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(up_threshold); +define_one_global_rw(down_differential); +define_one_global_rw(down_threshold); +define_one_global_rw(hotplug_in_sampling_periods); +define_one_global_rw(hotplug_out_sampling_periods); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(io_is_busy); +define_one_global_rw(boost_timeout); + +static struct attribute *dbs_attributes[] = { + &sampling_rate.attr, + &up_threshold.attr, + &down_differential.attr, + &down_threshold.attr, + &hotplug_in_sampling_periods.attr, + &hotplug_out_sampling_periods.attr, + &ignore_nice_load.attr, + &io_is_busy.attr, + &boost_timeout.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "abyssplug", +}; + +/************************** sysfs end ************************/ + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + /* combined load of all enabled CPUs */ + unsigned int total_load = 0; + /* single largest CPU load percentage*/ + unsigned int max_load = 0; + /* largest CPU load in terms of frequency */ + unsigned int max_load_freq = 0; + /* average load across all enabled CPUs */ + unsigned int avg_load = 0; + /* average load across multiple sampling periods for hotplug events */ + unsigned int hotplug_in_avg_load = 0; + unsigned int hotplug_out_avg_load = 0; + /* number of sampling periods averaged for hotplug decisions */ + unsigned int periods; + + struct cpufreq_policy *policy; + unsigned int i, j; + + policy = this_dbs_info->cur_policy; + + /* + * cpu load accounting + * get highest load, total load and average load across all CPUs + */ + for_each_cpu(j, policy->cpus) { + unsigned int load; + unsigned int idle_time, wall_time; + cputime64_t cur_wall_time, cur_idle_time; + struct cpu_dbs_info_s *j_dbs_info; + + j_dbs_info = &per_cpu(hp_cpu_dbs_info, j); + + /* update both cur_idle_time and cur_wall_time */ + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + + /* how much wall time has passed since last iteration? */ + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + /* how much idle time has passed since last iteration? */ + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + /* load is the percentage of time not spent in idle */ + load = 100 * (wall_time - idle_time) / wall_time; + + /* keep track of combined load across all CPUs */ + total_load += load; + + /* keep track of highest single load across all CPUs */ + if (load > max_load) + max_load = load; + } + + /* use the max load in the OPP freq change policy */ + max_load_freq = max_load * policy->cur; + + /* calculate the average load across all related CPUs */ + avg_load = total_load / num_online_cpus(); + + mutex_lock(&dbs_mutex); + + /* + * hotplug load accounting + * average load over multiple sampling periods + */ + + /* how many sampling periods do we use for hotplug decisions? */ + periods = max(dbs_tuners_ins.hotplug_in_sampling_periods, + dbs_tuners_ins.hotplug_out_sampling_periods); + + /* store avg_load in the circular buffer */ + dbs_tuners_ins.hotplug_load_history[dbs_tuners_ins.hotplug_load_index] + = avg_load; + + /* compute average load across in & out sampling periods */ + for (i = 0, j = dbs_tuners_ins.hotplug_load_index; + i < periods; i++, j--) { + if (i < dbs_tuners_ins.hotplug_in_sampling_periods) + hotplug_in_avg_load += + dbs_tuners_ins.hotplug_load_history[j]; + if (i < dbs_tuners_ins.hotplug_out_sampling_periods) + hotplug_out_avg_load += + dbs_tuners_ins.hotplug_load_history[j]; + + if (j == 0) + j = periods; + } + + hotplug_in_avg_load = hotplug_in_avg_load / + dbs_tuners_ins.hotplug_in_sampling_periods; + + hotplug_out_avg_load = hotplug_out_avg_load / + dbs_tuners_ins.hotplug_out_sampling_periods; + + /* return to first element if we're at the circular buffer's end */ + if (++dbs_tuners_ins.hotplug_load_index == periods) + dbs_tuners_ins.hotplug_load_index = 0; + + /* check if auxiliary CPU is needed based on avg_load */ + if (avg_load > dbs_tuners_ins.up_threshold) { + /* should we enable auxillary CPUs? */ + if (num_online_cpus() < 2 && hotplug_in_avg_load > + dbs_tuners_ins.up_threshold) { + queue_work_on(this_dbs_info->cpu, khotplug_wq, + &this_dbs_info->cpu_up_work); + goto out; + } + } + + /* check for frequency increase based on max_load */ + if (max_load > dbs_tuners_ins.up_threshold) { + /* increase to highest frequency supported */ + if (policy->cur < policy->max) + __cpufreq_driver_target(policy, policy->max, + CPUFREQ_RELATION_H); + + goto out; + } + + /* check for frequency decrease */ + if (avg_load < dbs_tuners_ins.down_threshold) { + /* are we at the minimum frequency already? */ + if (policy->cur <= policy->min) { + /* should we disable auxillary CPUs? */ + if (num_online_cpus() > 1 && hotplug_out_avg_load < + dbs_tuners_ins.down_threshold) { + queue_work_on(this_dbs_info->cpu, khotplug_wq, + &this_dbs_info->cpu_down_work); + } + goto out; + } + } + + /* + * go down to the lowest frequency which can sustain the load by + * keeping 30% of idle in order to not cross the up_threshold + */ + if ((max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) && (policy->cur > policy->min)) { + unsigned int freq_next; + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + if (freq_next < policy->min) + freq_next = policy->min; + + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } +out: + mutex_unlock(&dbs_mutex); + return; +} + +static void __cpuinit do_cpu_up(struct work_struct *work) +{ + cpu_up(1); +} + +static void __cpuinit do_cpu_down(struct work_struct *work) +{ + cpu_down(1); +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int delay = 0; + + mutex_lock(&dbs_info->timer_mutex); + if (!dbs_info->boost_applied) { + dbs_check_cpu(dbs_info); + /* We want all related CPUs to do sampling nearly on same jiffy */ + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + } else { + delay = usecs_to_jiffies(dbs_tuners_ins.boost_timeout); + dbs_info->boost_applied = 0; + if (num_online_cpus() < 2) + queue_work_on(cpu, khotplug_wq, + &dbs_info->cpu_up_work); + } + queue_delayed_work_on(cpu, khotplug_wq, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all related CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + delay -= jiffies % delay; + + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + if (!dbs_info->boost_applied) + delay = usecs_to_jiffies(dbs_tuners_ins.boost_timeout); + queue_delayed_work_on(dbs_info->cpu, khotplug_wq, &dbs_info->work, + delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int i, j, max_periods; + int rc; + + this_dbs_info = &per_cpu(hp_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(hp_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + + max_periods = max(DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS, + DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS); + dbs_tuners_ins.hotplug_load_history = kmalloc( + (sizeof(unsigned int) * max_periods), + GFP_KERNEL); + if (!dbs_tuners_ins.hotplug_load_history) { + WARN_ON(1); + return -ENOMEM; + } + for (i = 0; i < max_periods; i++) + dbs_tuners_ins.hotplug_load_history[i] = 50; + } + this_dbs_info->cpu = cpu; + this_dbs_info->freq_table = cpufreq_frequency_get_table(cpu); + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + } + if (!dbs_tuners_ins.boost_timeout) + dbs_tuners_ins.boost_timeout = dbs_tuners_ins.sampling_rate * 30; + mutex_unlock(&dbs_mutex); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + mutex_destroy(&this_dbs_info->timer_mutex); + dbs_enable--; + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + kfree(dbs_tuners_ins.hotplug_load_history); + /* + * XXX BIG CAVEAT: Stopping the governor with CPU1 offline + * will result in it remaining offline until the user onlines + * it again. It is up to the user to do this (for now). + */ + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +#if 0 +static int hotplug_boost(struct cpufreq_policy *policy) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + + this_dbs_info = &per_cpu(hp_cpu_dbs_info, cpu); + +#if 0 + /* Already at max? */ + if (policy->cur == policy->max) + return; +#endif + + mutex_lock(&this_dbs_info->timer_mutex); + this_dbs_info->boost_applied = 1; + __cpufreq_driver_target(policy, policy->max, + CPUFREQ_RELATION_H); + mutex_unlock(&this_dbs_info->timer_mutex); + + return 0; +} +#endif + +static int __init cpufreq_gov_dbs_init(void) +{ + int err; + cputime64_t wall; + u64 idle_time; + int cpu = get_cpu(); + struct cpu_dbs_info_s *dbs_info = &per_cpu(hp_cpu_dbs_info, 0); + + INIT_WORK(&dbs_info->cpu_up_work, do_cpu_up); + INIT_WORK(&dbs_info->cpu_down_work, do_cpu_down); + + idle_time = get_cpu_idle_time_us(cpu, &wall); + put_cpu(); + if (idle_time != -1ULL) { + dbs_tuners_ins.up_threshold = DEFAULT_UP_FREQ_MIN_LOAD; + } else { + pr_err("cpufreq-abyssplug: %s: assumes CONFIG_NO_HZ\n", + __func__); + return -EINVAL; + } + + khotplug_wq = create_workqueue("khotplug"); + if (!khotplug_wq) { + pr_err("Creation of khotplug failed\n"); + return -EFAULT; + } + err = cpufreq_register_governor(&cpufreq_gov_abyssplug); + if (err) + destroy_workqueue(khotplug_wq); + + return err; +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_abyssplug); + destroy_workqueue(khotplug_wq); +} + +MODULE_DESCRIPTION("'cpufreq_abyssplug' - cpufreq governor for dynamic frequency scaling and CPU hotplug"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ABYSSPLUG +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); + diff --git a/drivers/cpufreq/cpufreq_adaptive.c b/drivers/cpufreq/cpufreq_adaptive.c new file mode 100644 index 00000000..2eff3e28 --- /dev/null +++ b/drivers/cpufreq/cpufreq_adaptive.c @@ -0,0 +1,952 @@ +/* + * drivers/cpufreq/cpufreq_adaptive.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) +#define DEF_FREQUENCY_UP_THRESHOLD (80) +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +#define MICRO_FREQUENCY_UP_THRESHOLD (95) +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) +#define MIN_ONDEMAND_THRESHOLD (4) +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. + * All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +static void (*pm_idle_old)(void); +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ADAPTIVE +static +#endif +struct cpufreq_governor cpufreq_gov_adaptive = { + .name = "adaptive", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_hi_jiffies; + int cpu; + unsigned int sample_type:1; + bool ondemand; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on + * different CPUs. It protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); +static struct task_struct *up_task; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_down_work; +static cpumask_t up_cpumask; +static spinlock_t up_cpumask_lock; +static cpumask_t down_cpumask; +static spinlock_t down_cpumask_lock; + +static DEFINE_PER_CPU(cputime64_t, idle_in_idle); +static DEFINE_PER_CPU(cputime64_t, idle_exit_wall); + +static struct timer_list cpu_timer; +static unsigned int target_freq; +static DEFINE_MUTEX(short_timer_mutex); + +/* Go to max speed when CPU load at or above this value. */ +#define DEFAULT_GO_MAXSPEED_LOAD 60 +static unsigned long go_maxspeed_load; + +#define DEFAULT_KEEP_MINSPEED_LOAD 30 +static unsigned long keep_minspeed_load; + +#define DEFAULT_STEPUP_LOAD 10 +static unsigned long step_up_load; + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int ignore_nice; + unsigned int io_is_busy; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .ignore_nice = 0, +}; + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +static void adaptive_init_cpu(int cpu) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_max(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + printk_once(KERN_INFO "CPUFREQ: adaptive sampling_rate_max " + "sysfs file is deprecated - used by: %s\n", current->comm); + return sprintf(buf, "%u\n", -1U); +} + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_max); +define_one_global_ro(sampling_rate_min); + +/* cpufreq_adaptive Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(ignore_nice_load, ignore_nice); + +/*** delete after deprecation time ***/ + +#define DEPRECATION_MSG(file_name) \ + printk_once(KERN_INFO "CPUFREQ: Per core adaptive sysfs " \ + "interface is deprecated - " #file_name "\n"); + +#define show_one_old(file_name) \ +static ssize_t show_##file_name##_old \ +(struct cpufreq_policy *unused, char *buf) \ +{ \ + printk_once(KERN_INFO "CPUFREQ: Per core adaptive sysfs " \ + "interface is deprecated - " #file_name "\n"); \ + return show_##file_name(NULL, NULL, buf); \ +} + +/*** delete after deprecation time ***/ + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.io_is_busy = !!input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.up_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + mutex_lock(&dbs_mutex); + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + mutex_unlock(&dbs_mutex); + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time_us(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + + } + mutex_unlock(&dbs_mutex); + + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(ignore_nice_load); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_max.attr, + &sampling_rate_min.attr, + &sampling_rate.attr, + &up_threshold.attr, + &ignore_nice_load.attr, + &io_is_busy.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "adaptive", +}; + +/*** delete after deprecation time ***/ + +#define write_one_old(file_name) \ +static ssize_t store_##file_name##_old \ +(struct cpufreq_policy *unused, const char *buf, size_t count) \ +{ \ + printk_once(KERN_INFO "CPUFREQ: Per core adaptive sysfs " \ + "interface is deprecated - " #file_name "\n"); \ + return store_##file_name(NULL, NULL, buf, count); \ +} + +static void cpufreq_adaptive_timer(unsigned long data) +{ + cputime64_t cur_idle; + cputime64_t cur_wall; + unsigned int delta_idle; + unsigned int delta_time; + int short_load; + unsigned int new_freq; + unsigned long flags; + struct cpu_dbs_info_s *this_dbs_info; + struct cpufreq_policy *policy; + unsigned int j; + unsigned int index; + unsigned int max_load = 0; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); + + policy = this_dbs_info->cur_policy; + + for_each_online_cpu(j) { + cur_idle = get_cpu_idle_time_us(j, &cur_wall); + + delta_idle = (unsigned int) cputime64_sub(cur_idle, + per_cpu(idle_in_idle, j)); + delta_time = (unsigned int) cputime64_sub(cur_wall, + per_cpu(idle_exit_wall, j)); + + /* + * If timer ran less than 1ms after short-term sample started, retry. + */ + if (delta_time < 1000) + goto do_nothing; + + if (delta_idle > delta_time) + short_load = 0; + else + short_load = 100 * (delta_time - delta_idle) / delta_time; + + if (short_load > max_load) + max_load = short_load; + } + + if (this_dbs_info->ondemand) + goto do_nothing; + + if (max_load >= go_maxspeed_load) + new_freq = policy->max; + else + new_freq = policy->max * max_load / 100; + + if ((max_load <= keep_minspeed_load) && + (policy->cur == policy->min)) + new_freq = policy->cur; + + if (cpufreq_frequency_table_target(policy, this_dbs_info->freq_table, + new_freq, CPUFREQ_RELATION_L, + &index)) { + goto do_nothing; + } + + new_freq = this_dbs_info->freq_table[index].frequency; + + target_freq = new_freq; + + if (new_freq < this_dbs_info->cur_policy->cur) { + spin_lock_irqsave(&down_cpumask_lock, flags); + cpumask_set_cpu(0, &down_cpumask); + spin_unlock_irqrestore(&down_cpumask_lock, flags); + queue_work(down_wq, &freq_scale_down_work); + } else { + spin_lock_irqsave(&up_cpumask_lock, flags); + cpumask_set_cpu(0, &up_cpumask); + spin_unlock_irqrestore(&up_cpumask_lock, flags); + wake_up_process(up_task); + } + + return; + +do_nothing: + for_each_online_cpu(j) { + per_cpu(idle_in_idle, j) = + get_cpu_idle_time_us(j, + &per_cpu(idle_exit_wall, j)); + } + mod_timer(&cpu_timer, jiffies + 2); + schedule_delayed_work_on(0, &this_dbs_info->work, 10); + + if (mutex_is_locked(&short_timer_mutex)) + mutex_unlock(&short_timer_mutex); + return; +} + +/*** delete after deprecation time ***/ + +/************************** sysfs end ************************/ + +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ +#ifndef CONFIG_ARCH_EXYNOS4 + if (p->cur == p->max) + return; +#endif + __cpufreq_driver_target(p, freq, CPUFREQ_RELATION_H); +} + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + unsigned int max_load_freq; + + struct cpufreq_policy *policy; + unsigned int j; + + unsigned int index, new_freq; + unsigned int longterm_load = 0; + + policy = this_dbs_info->cur_policy; + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate, we look for a the lowest + * frequency which can sustain the load while keeping idle time over + * 30%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of current frequency + */ + + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load, load_freq; + int freq_avg; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time_us(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, + j_dbs_info->prev_cpu_iowait); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + /* + * For the purpose of adaptive, waiting for disk IO is an + * indication that you're performance critical, and not that + * the system is actually idle. So subtract the iowait time + * from the cpu idle time. + */ + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + if (load > longterm_load) + longterm_load = load; + + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = load * freq_avg; + + if (load_freq > max_load_freq) + max_load_freq = load_freq; + } + + if (longterm_load >= MIN_ONDEMAND_THRESHOLD) + this_dbs_info->ondemand = true; + else + this_dbs_info->ondemand = false; + + /* Check for frequency increase */ + if (max_load_freq > (dbs_tuners_ins.up_threshold * policy->cur)) { + cpufreq_frequency_table_target(policy, + this_dbs_info->freq_table, + (policy->cur + step_up_load), + CPUFREQ_RELATION_L, &index); + + new_freq = this_dbs_info->freq_table[index].frequency; + dbs_freq_increase(policy, new_freq); + return; + } + + /* Check for frequency decrease */ + /* if we cannot reduce the frequency anymore, break out early */ +#ifndef CONFIG_ARCH_EXYNOS4 + if (policy->cur == policy->min) + return; +#endif + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + if (freq_next < policy->min) + freq_next = policy->min; + + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + + int delay; + + mutex_lock(&dbs_info->timer_mutex); + + /* Common NORMAL_SAMPLE setup */ + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + dbs_check_cpu(dbs_info); + + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + schedule_delayed_work_on(cpu, &dbs_info->work, delay); + + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); +} + +/* + * Not all CPUs want IO time to be accounted as busy; this dependson how + * efficient idling at a higher frequency/voltage is. + * Pavel Machek says this is not so for various generations of AMD and old + * Intel systems. + * Mike Chan (androidlcom) calis this is also not true for ARM. + * Because of this, whitelist specific known (series) of CPUs by default, and + * leave all others up to the user. + */ +static int should_io_be_busy(void) +{ +#if defined(CONFIG_X86) + /* + * For Intel, Core 2 (model 15) andl later have an efficient idle. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model >= 15) + return 1; +#endif + return 0; +} + +static void cpufreq_adaptive_idle(void) +{ + int i; + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, 0); + struct cpufreq_policy *policy; + + policy = dbs_info->cur_policy; + + pm_idle_old(); + + if ((policy->cur == policy->min) || + (policy->cur == policy->max)) { + + if (timer_pending(&cpu_timer)) + return; + + if (mutex_trylock(&short_timer_mutex)) { + for_each_online_cpu(i) { + per_cpu(idle_in_idle, i) = + get_cpu_idle_time_us(i, + &per_cpu(idle_exit_wall, i)); + } + + mod_timer(&cpu_timer, jiffies + 2); + cancel_delayed_work(&dbs_info->work); + } + } else { + if (timer_pending(&cpu_timer)) + del_timer(&cpu_timer); + + } +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + + rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time_us(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + } + this_dbs_info->cpu = cpu; + adaptive_init_cpu(cpu); + + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); + dbs_tuners_ins.io_is_busy = should_io_be_busy(); + } + mutex_unlock(&dbs_mutex); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + + pm_idle_old = pm_idle; + pm_idle = cpufreq_adaptive_idle; + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + sysfs_remove_group(&policy->kobj, &dbs_attr_group); + mutex_destroy(&this_dbs_info->timer_mutex); + dbs_enable--; + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + pm_idle = pm_idle_old; + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +static inline void cpufreq_adaptive_update_time(void) +{ + struct cpu_dbs_info_s *this_dbs_info; + struct cpufreq_policy *policy; + int j; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); + policy = this_dbs_info->cur_policy; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time_us(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + j_dbs_info->prev_cpu_wall = cur_wall_time; + + j_dbs_info->prev_cpu_idle = cur_idle_time; + + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + + } + +} + +static int cpufreq_adaptive_up_task(void *data) +{ + unsigned long flags; + struct cpu_dbs_info_s *this_dbs_info; + struct cpufreq_policy *policy; + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); + policy = this_dbs_info->cur_policy; + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + spin_lock_irqsave(&up_cpumask_lock, flags); + + if (cpumask_empty(&up_cpumask)) { + spin_unlock_irqrestore(&up_cpumask_lock, flags); + schedule(); + + if (kthread_should_stop()) + break; + + spin_lock_irqsave(&up_cpumask_lock, flags); + } + + set_current_state(TASK_RUNNING); + + cpumask_clear(&up_cpumask); + spin_unlock_irqrestore(&up_cpumask_lock, flags); + + __cpufreq_driver_target(this_dbs_info->cur_policy, + target_freq, + CPUFREQ_RELATION_H); + if (policy->cur != policy->max) { + mutex_lock(&this_dbs_info->timer_mutex); + + schedule_delayed_work_on(0, &this_dbs_info->work, delay); + mutex_unlock(&this_dbs_info->timer_mutex); + cpufreq_adaptive_update_time(); + } + if (mutex_is_locked(&short_timer_mutex)) + mutex_unlock(&short_timer_mutex); + } + + return 0; +} + +static void cpufreq_adaptive_freq_down(struct work_struct *work) +{ + unsigned long flags; + struct cpu_dbs_info_s *this_dbs_info; + struct cpufreq_policy *policy; + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + spin_lock_irqsave(&down_cpumask_lock, flags); + cpumask_clear(&down_cpumask); + spin_unlock_irqrestore(&down_cpumask_lock, flags); + + this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); + policy = this_dbs_info->cur_policy; + + __cpufreq_driver_target(this_dbs_info->cur_policy, + target_freq, + CPUFREQ_RELATION_H); + + if (policy->cur != policy->min) { + mutex_lock(&this_dbs_info->timer_mutex); + + schedule_delayed_work_on(0, &this_dbs_info->work, delay); + mutex_unlock(&this_dbs_info->timer_mutex); + cpufreq_adaptive_update_time(); + } + + if (mutex_is_locked(&short_timer_mutex)) + mutex_unlock(&short_timer_mutex); +} + +static int __init cpufreq_gov_dbs_init(void) +{ + cputime64_t wall; + u64 idle_time; + int cpu = get_cpu(); + + struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD; + keep_minspeed_load = DEFAULT_KEEP_MINSPEED_LOAD; + step_up_load = DEFAULT_STEPUP_LOAD; + + idle_time = get_cpu_idle_time_us(cpu, &wall); + put_cpu(); + if (idle_time != -1ULL) { + /* Idle micro accounting is supported. Use finer thresholds */ + dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + dbs_tuners_ins.down_differential = + MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + /* + * In no_hz/micro accounting case we set the minimum frequency + * not depending on HZ, but fixed (very low). The deferred + * timer might skip some samples if idle/sleeping as needed. + */ + min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + } else { + /* For correct statistics, we need 10 ticks for each measure */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + } + + init_timer(&cpu_timer); + cpu_timer.function = cpufreq_adaptive_timer; + + up_task = kthread_create(cpufreq_adaptive_up_task, NULL, + "kadaptiveup"); + + if (IS_ERR(up_task)) + return PTR_ERR(up_task); + + sched_setscheduler_nocheck(up_task, SCHED_FIFO, ¶m); + get_task_struct(up_task); + + /* No rescuer thread, bind to CPU queuing the work for possibly + warm cache (probably doesn't matter much). */ + down_wq = alloc_workqueue("kadaptive_down", 0, 1); + + if (!down_wq) + goto err_freeuptask; + + INIT_WORK(&freq_scale_down_work, cpufreq_adaptive_freq_down); + + + return cpufreq_register_governor(&cpufreq_gov_adaptive); +err_freeuptask: + put_task_struct(up_task); + return -ENOMEM; +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_adaptive); +} + + +MODULE_AUTHOR("Venkatesh Pallipadi "); +MODULE_AUTHOR("Alexey Starikovskiy "); +MODULE_DESCRIPTION("'cpufreq_adaptive' - A dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ADAPTIVE +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_badass.c b/drivers/cpufreq/cpufreq_badass.c index eb3f433a..45b14d1e 100644 --- a/drivers/cpufreq/cpufreq_badass.c +++ b/drivers/cpufreq/cpufreq_badass.c @@ -188,12 +188,12 @@ static struct bds_tuners { #endif }; -static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, - cputime64_t *wall) +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, + u64 *wall) { - cputime64_t idle_time; - cputime64_t cur_wall_time; - cputime64_t busy_time; + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); @@ -204,11 +204,11 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - idle_time = (cur_wall_time - busy_time); + idle_time = cur_wall_time - busy_time; if (wall) - *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + *wall = jiffies_to_usecs(cur_wall_time); - return (cputime64_t)jiffies_to_usecs(idle_time); + return jiffies_to_usecs(idle_time); } static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) @@ -830,7 +830,11 @@ int set_three_phase_freq_badass(int cpufreq) static void bds_check_cpu(struct cpu_bds_info_s *this_bds_info) { + /* Extrapolated load of this CPU */ + unsigned int load_at_max_freq = 0; unsigned int max_load_freq; + /* Current load across this CPU */ + unsigned int cur_load = 0; struct cpufreq_policy *policy; unsigned int j; @@ -866,7 +870,7 @@ static void bds_check_cpu(struct cpu_bds_info_s *this_bds_info) struct cpu_bds_info_s *j_bds_info; cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; unsigned int idle_time, wall_time, iowait_time; - unsigned int load, load_freq; + unsigned int load_freq; int freq_avg; j_bds_info = &per_cpu(od_cpu_bds_info, j); @@ -874,20 +878,24 @@ static void bds_check_cpu(struct cpu_bds_info_s *this_bds_info) cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - wall_time = (unsigned int) (cur_wall_time - j_bds_info->prev_cpu_wall); + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + j_bds_info->prev_cpu_wall); j_bds_info->prev_cpu_wall = cur_wall_time; - idle_time = (unsigned int) (cur_idle_time - j_bds_info->prev_cpu_idle); + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + j_bds_info->prev_cpu_idle); j_bds_info->prev_cpu_idle = cur_idle_time; - iowait_time = (unsigned int) (cur_iowait_time - j_bds_info->prev_cpu_iowait); + iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, + j_bds_info->prev_cpu_iowait); j_bds_info->prev_cpu_iowait = cur_iowait_time; if (bds_tuners_ins.ignore_nice) { cputime64_t cur_nice; unsigned long cur_nice_jiffies; - cur_nice = (kcpustat_cpu(j).cpustat[CPUTIME_NICE] - j_bds_info->prev_cpu_nice); + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_bds_info->prev_cpu_nice; /* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys @@ -912,16 +920,20 @@ static void bds_check_cpu(struct cpu_bds_info_s *this_bds_info) if (unlikely(!wall_time || wall_time < idle_time)) continue; - load = 100 * (wall_time - idle_time) / wall_time; + cur_load = 100 * (wall_time - idle_time) / wall_time; freq_avg = __cpufreq_driver_getavg(policy, j); if (freq_avg <= 0) freq_avg = policy->cur; - load_freq = load * freq_avg; + load_freq = cur_load * freq_avg; if (load_freq > max_load_freq) max_load_freq = load_freq; } + /* calculate the scaled load across CPU */ + load_at_max_freq = (cur_load * policy->cur)/policy->cpuinfo.max_freq; + + cpufreq_notify_utilization(policy, load_at_max_freq); /* Check for frequency increase */ if (max_load_freq > bds_tuners_ins.up_threshold * policy->cur) { diff --git a/drivers/cpufreq/cpufreq_nightmare.c b/drivers/cpufreq/cpufreq_nightmare.c new file mode 100644 index 00000000..ece971ca --- /dev/null +++ b/drivers/cpufreq/cpufreq_nightmare.c @@ -0,0 +1,1656 @@ +/* + * drivers/cpufreq/cpufreq_nightmare.c + * + * Copyright (C) 2011 Samsung Electronics co. ltd + * ByungChang Cha + * + * Based on ondemand governor + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Created by Alucard_24@xda + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_HAS_EARLYSUSPEND +#include +#endif +#define EARLYSUSPEND_HOTPLUGLOCK 1 + +/* + * runqueue average + */ + +#define RQ_AVG_TIMER_RATE 10 + +struct runqueue_data { + unsigned int nr_run_avg; + unsigned int update_rate; + int64_t last_time; + int64_t total_time; + struct delayed_work work; + struct workqueue_struct *nr_run_wq; + spinlock_t lock; +}; + +static struct runqueue_data *rq_data; +static void rq_work_fn(struct work_struct *work); + +static void start_rq_work(void) +{ + rq_data->nr_run_avg = 0; + rq_data->last_time = 0; + rq_data->total_time = 0; + if (rq_data->nr_run_wq == NULL) + rq_data->nr_run_wq = + create_singlethread_workqueue("nr_run_avg"); + + queue_delayed_work(rq_data->nr_run_wq, &rq_data->work, + msecs_to_jiffies(rq_data->update_rate)); + return; +} + +static void stop_rq_work(void) +{ + if (rq_data->nr_run_wq) + cancel_delayed_work(&rq_data->work); + return; +} + +static int __init init_rq_avg(void) +{ + rq_data = kzalloc(sizeof(struct runqueue_data), GFP_KERNEL); + if (rq_data == NULL) { + pr_err("%s cannot allocate memory\n", __func__); + return -ENOMEM; + } + spin_lock_init(&rq_data->lock); + rq_data->update_rate = RQ_AVG_TIMER_RATE; + INIT_DELAYED_WORK_DEFERRABLE(&rq_data->work, rq_work_fn); + + return 0; +} + +static void rq_work_fn(struct work_struct *work) +{ + int64_t time_diff = 0; + int64_t nr_run = 0; + unsigned long flags = 0; + int64_t cur_time = ktime_to_ns(ktime_get()); + + spin_lock_irqsave(&rq_data->lock, flags); + + if (rq_data->last_time == 0) + rq_data->last_time = cur_time; + if (rq_data->nr_run_avg == 0) + rq_data->total_time = 0; + + nr_run = nr_running() * 100; + time_diff = cur_time - rq_data->last_time; + do_div(time_diff, 1000 * 1000); + + if (time_diff != 0 && rq_data->total_time != 0) { + nr_run = (nr_run * time_diff) + + (rq_data->nr_run_avg * rq_data->total_time); + do_div(nr_run, rq_data->total_time + time_diff); + } + rq_data->nr_run_avg = nr_run; + rq_data->total_time += time_diff; + rq_data->last_time = cur_time; + + if (rq_data->update_rate != 0) + queue_delayed_work(rq_data->nr_run_wq, &rq_data->work, + msecs_to_jiffies(rq_data->update_rate)); + + spin_unlock_irqrestore(&rq_data->lock, flags); +} + +static unsigned int get_nr_run_avg(void) +{ + unsigned int nr_run_avg; + unsigned long flags = 0; + + spin_lock_irqsave(&rq_data->lock, flags); + nr_run_avg = rq_data->nr_run_avg; + rq_data->nr_run_avg = 0; + spin_unlock_irqrestore(&rq_data->lock, flags); + + return nr_run_avg; +} + + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_SAMPLING_UP_FACTOR (1) +#define MAX_SAMPLING_UP_FACTOR (100000) +#define DEF_SAMPLING_DOWN_FACTOR (2) +#define MAX_SAMPLING_DOWN_FACTOR (100000) +#define DEF_FREQ_STEP_DEC (5) + +#define DEF_SAMPLING_RATE (60000) +#define MIN_SAMPLING_RATE (10000) +#define MAX_HOTPLUG_RATE (40u) + +#define DEF_MAX_CPU_LOCK (0) +#define DEF_MIN_CPU_LOCK (0) +#define DEF_UP_NR_CPUS (1) +#define DEF_CPU_UP_RATE (10) +#define DEF_CPU_DOWN_RATE (20) +#define DEF_FREQ_STEP (30) + +#define DEF_START_DELAY (0) + +#define FREQ_FOR_RESPONSIVENESS (918000) + +#define HOTPLUG_DOWN_INDEX (0) +#define HOTPLUG_UP_INDEX (1) + +/* CPU freq will be increased if measured load > inc_cpu_load;*/ +#define DEF_INC_CPU_LOAD (80) +#define INC_CPU_LOAD_AT_MIN_FREQ (40) +#define UP_AVG_LOAD (65u) +/* CPU freq will be decreased if measured load < dec_cpu_load;*/ +#define DEF_DEC_CPU_LOAD (60) +#define DOWN_AVG_LOAD (30u) +#define DEF_FREQ_UP_BRAKE (5u) +#define DEF_HOTPLUG_COMPARE_LEVEL (0u) + +#ifdef CONFIG_MACH_MIDAS +static int hotplug_rq[4][2] = { + {0, 100}, {100, 200}, {200, 300}, {300, 0} +}; + +static int hotplug_freq[4][2] = { + {0, 540000}, + {378000, 540000}, + {378000, 540000}, + {378000, 0} +}; +#else +static int hotplug_rq[4][2] = { + {0, 100}, {100, 200}, {200, 300}, {300, 0} +}; + +static int hotplug_freq[4][2] = { + {0, 540000}, + {378000, 540000}, + {378000, 540000}, + {378000, 0} +}; +#endif + +static unsigned int min_sampling_rate; + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_nightmare(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_NIGHTMARE +static +#endif +struct cpufreq_governor cpufreq_gov_nightmare = { + .name = "nightmare", + .governor = cpufreq_governor_nightmare, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpufreq_nightmare_cpuinfo { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct work_struct up_work; + struct work_struct down_work; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_table_maxsize; + unsigned int avg_rate_mult; + int cpu; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpufreq_nightmare_cpuinfo, od_cpu_dbs_info); + +struct workqueue_struct *dvfs_workqueues; + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + + +/* + * dbs_mutex protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int freq_step_dec; + unsigned int ignore_nice; + unsigned int sampling_down_factor; + unsigned int io_is_busy; + /* nightmare tuners */ + unsigned int freq_step; + unsigned int cpu_up_rate; + unsigned int cpu_down_rate; + unsigned int up_nr_cpus; + unsigned int max_cpu_lock; + unsigned int min_cpu_lock; + atomic_t hotplug_lock; + unsigned int dvfs_debug; + unsigned int max_freq; + unsigned int min_freq; +#ifdef CONFIG_HAS_EARLYSUSPEND + int early_suspend; +#endif + unsigned int inc_cpu_load_at_min_freq; + unsigned int freq_for_responsiveness; + unsigned int inc_cpu_load; + unsigned int dec_cpu_load; + unsigned int up_avg_load; + unsigned int down_avg_load; + unsigned int sampling_up_factor; + unsigned int freq_up_brake; + unsigned int hotplug_compare_level; +} dbs_tuners_ins = { + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .freq_step_dec = DEF_FREQ_STEP_DEC, + .ignore_nice = 0, + .freq_step = DEF_FREQ_STEP, + .cpu_up_rate = DEF_CPU_UP_RATE, + .cpu_down_rate = DEF_CPU_DOWN_RATE, + .up_nr_cpus = DEF_UP_NR_CPUS, + .max_cpu_lock = DEF_MAX_CPU_LOCK, + .min_cpu_lock = DEF_MIN_CPU_LOCK, + .hotplug_lock = ATOMIC_INIT(0), + .dvfs_debug = 0, +#ifdef CONFIG_HAS_EARLYSUSPEND + .early_suspend = -1, +#endif + .inc_cpu_load_at_min_freq = INC_CPU_LOAD_AT_MIN_FREQ, + .freq_for_responsiveness = FREQ_FOR_RESPONSIVENESS, + .inc_cpu_load = DEF_INC_CPU_LOAD, + .dec_cpu_load = DEF_DEC_CPU_LOAD, + .up_avg_load = UP_AVG_LOAD, + .down_avg_load = DOWN_AVG_LOAD, + .sampling_up_factor = DEF_SAMPLING_UP_FACTOR, + .freq_up_brake = DEF_FREQ_UP_BRAKE, + .hotplug_compare_level = DEF_HOTPLUG_COMPARE_LEVEL, +}; + + +/* + * CPU hotplug lock interface + */ + +static atomic_t g_hotplug_count = ATOMIC_INIT(0); +static atomic_t g_hotplug_lock = ATOMIC_INIT(0); + +static void apply_hotplug_lock(void) +{ + int online, possible, lock, flag; + struct work_struct *work; + struct cpufreq_nightmare_cpuinfo *dbs_info; + + /* do turn_on/off cpus */ + dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ + online = num_online_cpus(); + possible = num_possible_cpus(); + lock = atomic_read(&g_hotplug_lock); + flag = lock - online; + + if (lock == 0 || flag == 0) + return; + + work = flag > 0 ? &dbs_info->up_work : &dbs_info->down_work; + + pr_debug("%s online %d possible %d lock %d flag %d %d\n", + __func__, online, possible, lock, flag, (int)abs(flag)); + + queue_work_on(dbs_info->cpu, dvfs_workqueues, work); +} + +int cpufreq_nightmare_cpu_lock(int num_core) +{ + int prev_lock; + + if (num_core < 1 || num_core > num_possible_cpus()) + return -EINVAL; + + prev_lock = atomic_read(&g_hotplug_lock); + + if (prev_lock != 0 && prev_lock < num_core) + return -EINVAL; + else if (prev_lock == num_core) + atomic_inc(&g_hotplug_count); + + atomic_set(&g_hotplug_lock, num_core); + atomic_set(&g_hotplug_count, 1); + apply_hotplug_lock(); + + return 0; +} + +int cpufreq_nightmare_cpu_unlock(int num_core) +{ + int prev_lock = atomic_read(&g_hotplug_lock); + + if (prev_lock < num_core) + return 0; + else if (prev_lock == num_core) + atomic_dec(&g_hotplug_count); + + if (atomic_read(&g_hotplug_count) == 0) + atomic_set(&g_hotplug_lock, 0); + + return 0; +} + +void cpufreq_nightmare_min_cpu_lock(unsigned int num_core) +{ + int online, flag; + struct cpufreq_nightmare_cpuinfo *dbs_info; + + dbs_tuners_ins.min_cpu_lock = min(num_core, num_possible_cpus()); + + dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ + online = num_online_cpus(); + flag = (int)num_core - online; + if (flag <= 0) + return; + queue_work_on(dbs_info->cpu, dvfs_workqueues, &dbs_info->up_work); +} + +void cpufreq_nightmare_min_cpu_unlock(void) +{ + int online, lock, flag; + struct cpufreq_nightmare_cpuinfo *dbs_info; + + dbs_tuners_ins.min_cpu_lock = 0; + + dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ + online = num_online_cpus(); + lock = atomic_read(&g_hotplug_lock); + if (lock == 0) + return; + flag = lock - online; + if (flag >= 0) + return; + queue_work_on(dbs_info->cpu, dvfs_workqueues, &dbs_info->down_work); +} + +/* + * History of CPU usage + */ +struct cpu_usage { + unsigned int freq; + int load[NR_CPUS]; + unsigned int rq_avg; + unsigned int avg_load; +}; + +struct cpu_usage_history { + struct cpu_usage usage[MAX_HOTPLUG_RATE]; + unsigned int num_hist; +}; + +struct cpu_usage_history *hotplug_histories; + +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, NULL); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + else + idle_time += get_cpu_iowait_time_us(cpu, wall); + + return idle_time; +} + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, + cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_min); + +/* cpufreq_nightmare Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(sampling_down_factor, sampling_down_factor); +show_one(ignore_nice_load, ignore_nice); +show_one(freq_step_dec, freq_step_dec); +show_one(freq_step, freq_step); +show_one(cpu_up_rate, cpu_up_rate); +show_one(cpu_down_rate, cpu_down_rate); +show_one(up_nr_cpus, up_nr_cpus); +show_one(max_cpu_lock, max_cpu_lock); +show_one(min_cpu_lock, min_cpu_lock); +show_one(dvfs_debug, dvfs_debug); +show_one(inc_cpu_load_at_min_freq, inc_cpu_load_at_min_freq); +show_one(freq_for_responsiveness, freq_for_responsiveness); +show_one(inc_cpu_load, inc_cpu_load); +show_one(dec_cpu_load, dec_cpu_load); +show_one(up_avg_load, up_avg_load); +show_one(down_avg_load, down_avg_load); +show_one(sampling_up_factor, sampling_up_factor); +show_one(freq_up_brake, freq_up_brake); +show_one(hotplug_compare_level,hotplug_compare_level); + +static ssize_t show_hotplug_lock(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", atomic_read(&g_hotplug_lock)); +} + +#define show_hotplug_param(file_name, num_core, up_down) \ +static ssize_t show_##file_name##_##num_core##_##up_down \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", file_name[num_core - 1][up_down]); \ +} + +#define store_hotplug_param(file_name, num_core, up_down) \ +static ssize_t store_##file_name##_##num_core##_##up_down \ +(struct kobject *kobj, struct attribute *attr, \ + const char *buf, size_t count) \ +{ \ + unsigned int input; \ + int ret; \ + ret = sscanf(buf, "%u", &input); \ + if (ret != 1) \ + return -EINVAL; \ + file_name[num_core - 1][up_down] = input; \ + return count; \ +} + +show_hotplug_param(hotplug_freq, 1, 1); +show_hotplug_param(hotplug_freq, 2, 0); +#ifndef CONFIG_CPU_EXYNOS4210 +show_hotplug_param(hotplug_freq, 2, 1); +show_hotplug_param(hotplug_freq, 3, 0); +show_hotplug_param(hotplug_freq, 3, 1); +show_hotplug_param(hotplug_freq, 4, 0); +#endif + +show_hotplug_param(hotplug_rq, 1, 1); +show_hotplug_param(hotplug_rq, 2, 0); +#ifndef CONFIG_CPU_EXYNOS4210 +show_hotplug_param(hotplug_rq, 2, 1); +show_hotplug_param(hotplug_rq, 3, 0); +show_hotplug_param(hotplug_rq, 3, 1); +show_hotplug_param(hotplug_rq, 4, 0); +#endif + +store_hotplug_param(hotplug_freq, 1, 1); +store_hotplug_param(hotplug_freq, 2, 0); +#ifndef CONFIG_CPU_EXYNOS4210 +store_hotplug_param(hotplug_freq, 2, 1); +store_hotplug_param(hotplug_freq, 3, 0); +store_hotplug_param(hotplug_freq, 3, 1); +store_hotplug_param(hotplug_freq, 4, 0); +#endif + +store_hotplug_param(hotplug_rq, 1, 1); +store_hotplug_param(hotplug_rq, 2, 0); +#ifndef CONFIG_CPU_EXYNOS4210 +store_hotplug_param(hotplug_rq, 2, 1); +store_hotplug_param(hotplug_rq, 3, 0); +store_hotplug_param(hotplug_rq, 3, 1); +store_hotplug_param(hotplug_rq, 4, 0); +#endif + +define_one_global_rw(hotplug_freq_1_1); +define_one_global_rw(hotplug_freq_2_0); +#ifndef CONFIG_CPU_EXYNOS4210 +define_one_global_rw(hotplug_freq_2_1); +define_one_global_rw(hotplug_freq_3_0); +define_one_global_rw(hotplug_freq_3_1); +define_one_global_rw(hotplug_freq_4_0); +#endif + +define_one_global_rw(hotplug_rq_1_1); +define_one_global_rw(hotplug_rq_2_0); +#ifndef CONFIG_CPU_EXYNOS4210 +define_one_global_rw(hotplug_rq_2_1); +define_one_global_rw(hotplug_rq_3_0); +define_one_global_rw(hotplug_rq_3_1); +define_one_global_rw(hotplug_rq_4_0); +#endif + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.io_is_busy = !!input; + return count; +} + +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + dbs_tuners_ins.sampling_down_factor = input; + + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpufreq_nightmare_cpuinfo *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = + get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + return count; +} + +static ssize_t store_freq_step_dec(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.freq_step_dec = min(input, 100u); + return count; +} + +static ssize_t store_freq_step(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.freq_step = min(input, 100u); + return count; +} + +static ssize_t store_cpu_up_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.cpu_up_rate = min(input, MAX_HOTPLUG_RATE); + return count; +} + +static ssize_t store_cpu_down_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.cpu_down_rate = min(input, MAX_HOTPLUG_RATE); + return count; +} + +static ssize_t store_up_nr_cpus(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.up_nr_cpus = min(input, num_possible_cpus()); + return count; +} + +static ssize_t store_max_cpu_lock(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.max_cpu_lock = min(input, num_possible_cpus()); + return count; +} + +static ssize_t store_min_cpu_lock(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + if (input == 0) + cpufreq_nightmare_min_cpu_unlock(); + else + cpufreq_nightmare_min_cpu_lock(input); + return count; +} + +static ssize_t store_hotplug_lock(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + int prev_lock; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + input = min(input, num_possible_cpus()); + prev_lock = atomic_read(&dbs_tuners_ins.hotplug_lock); + + if (prev_lock) + cpufreq_nightmare_cpu_unlock(prev_lock); + + if (input == 0) { + atomic_set(&dbs_tuners_ins.hotplug_lock, 0); + return count; + } + + ret = cpufreq_nightmare_cpu_lock(input); + if (ret) { + printk(KERN_ERR "[HOTPLUG] already locked with smaller value %d < %d\n", + atomic_read(&g_hotplug_lock), input); + return ret; + } + + atomic_set(&dbs_tuners_ins.hotplug_lock, input); + + return count; +} + +static ssize_t store_dvfs_debug(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.dvfs_debug = input > 0; + return count; +} + +static ssize_t store_inc_cpu_load_at_min_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > 100) { + return -EINVAL; + } + dbs_tuners_ins.inc_cpu_load_at_min_freq = min(input,dbs_tuners_ins.inc_cpu_load); + return count; +} + +static ssize_t store_freq_for_responsiveness(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.freq_for_responsiveness = input; + return count; +} + +/* inc_cpu_load */ +static ssize_t store_inc_cpu_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.inc_cpu_load = max(min(input,100u),10u); + return count; +} + +/* dec_cpu_load */ +static ssize_t store_dec_cpu_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.dec_cpu_load = max(min(input,95u),5u); + return count; +} + +/* up_avg_load */ +static ssize_t store_up_avg_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.up_avg_load = max(min(input,100u),10u); + return count; +} + +/* down_avg_load */ +static ssize_t store_down_avg_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.down_avg_load = max(min(input,95u),5u); + return count; +} + +/* sampling_up_factor */ +static ssize_t store_sampling_up_factor(struct kobject *a, + struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_UP_FACTOR || input < 1) + return -EINVAL; + dbs_tuners_ins.sampling_up_factor = input; + + return count; +} + +/* freq_up_brake */ +static ssize_t store_freq_up_brake(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1 || input < 0 || input > 100) + return -EINVAL; + + if (input == dbs_tuners_ins.freq_up_brake) { /* nothing to do */ + return count; + } + + dbs_tuners_ins.freq_up_brake = input; + + return count; +} + +/* hotplug_compare_level */ +static ssize_t store_hotplug_compare_level(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1 || input < 0 || input > 1) + return -EINVAL; + + if (input == dbs_tuners_ins.hotplug_compare_level) { /* nothing to do */ + return count; + } + + dbs_tuners_ins.hotplug_compare_level = input; + + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(freq_step_dec); +define_one_global_rw(freq_step); +define_one_global_rw(cpu_up_rate); +define_one_global_rw(cpu_down_rate); +define_one_global_rw(up_nr_cpus); +define_one_global_rw(max_cpu_lock); +define_one_global_rw(min_cpu_lock); +define_one_global_rw(hotplug_lock); +define_one_global_rw(dvfs_debug); +define_one_global_rw(inc_cpu_load_at_min_freq); +define_one_global_rw(freq_for_responsiveness); +define_one_global_rw(inc_cpu_load); +define_one_global_rw(dec_cpu_load); +define_one_global_rw(up_avg_load); +define_one_global_rw(down_avg_load); +define_one_global_rw(sampling_up_factor); +define_one_global_rw(freq_up_brake); +define_one_global_rw(hotplug_compare_level); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &sampling_down_factor.attr, + &ignore_nice_load.attr, + &io_is_busy.attr, + &freq_step_dec.attr, + &freq_step.attr, + &cpu_up_rate.attr, + &cpu_down_rate.attr, + &up_nr_cpus.attr, + /* priority: hotplug_lock > max_cpu_lock > min_cpu_lock + Exception: hotplug_lock on early_suspend uses min_cpu_lock */ + &max_cpu_lock.attr, + &min_cpu_lock.attr, + &hotplug_lock.attr, + &dvfs_debug.attr, + &hotplug_freq_1_1.attr, + &hotplug_freq_2_0.attr, +#ifndef CONFIG_CPU_EXYNOS4210 + &hotplug_freq_2_1.attr, + &hotplug_freq_3_0.attr, + &hotplug_freq_3_1.attr, + &hotplug_freq_4_0.attr, +#endif + &hotplug_rq_1_1.attr, + &hotplug_rq_2_0.attr, +#ifndef CONFIG_CPU_EXYNOS4210 + &hotplug_rq_2_1.attr, + &hotplug_rq_3_0.attr, + &hotplug_rq_3_1.attr, + &hotplug_rq_4_0.attr, +#endif + &inc_cpu_load_at_min_freq.attr, + &freq_for_responsiveness.attr, + &inc_cpu_load.attr, + &dec_cpu_load.attr, + &up_avg_load.attr, + &down_avg_load.attr, + &sampling_up_factor.attr, + &freq_up_brake.attr, + &hotplug_compare_level.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "nightmare", +}; + +/************************** sysfs end ************************/ + +static void __ref cpu_up_work(struct work_struct *work) +{ + int cpu; + int online = num_online_cpus(); + int nr_up = dbs_tuners_ins.up_nr_cpus; + int min_cpu_lock = dbs_tuners_ins.min_cpu_lock; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock && min_cpu_lock) + nr_up = max(hotplug_lock, min_cpu_lock) - online; + else if (hotplug_lock) + nr_up = hotplug_lock - online; + else if (min_cpu_lock) + nr_up = max(nr_up, min_cpu_lock - online); + + if (online == 1) { + printk(KERN_ERR "CPU_UP 3\n"); + cpu_up(num_possible_cpus() - 1); + nr_up -= 1; + } + + for_each_cpu_not(cpu, cpu_online_mask) { + if (nr_up-- == 0) + break; + if (cpu == 0) + continue; + printk(KERN_ERR "CPU_UP %d\n", cpu); + cpu_up(cpu); + } +} + +static void cpu_down_work(struct work_struct *work) +{ + int cpu; + int online = num_online_cpus(); + int nr_down = 1; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock) + nr_down = online - hotplug_lock; + + for_each_online_cpu(cpu) { + if (cpu == 0) + continue; + printk(KERN_ERR "CPU_DOWN %d\n", cpu); + cpu_down(cpu); + if (--nr_down == 0) + break; + } +} + +static void debug_hotplug_check(int which, int rq_avg, int freq, + struct cpu_usage *usage) +{ + int cpu; + printk(KERN_ERR "CHECK %s rq %d.%02d freq %d [", which ? "up" : "down", + rq_avg / 100, rq_avg % 100, freq); + for_each_online_cpu(cpu) { + printk(KERN_ERR "(%d, %d), ", cpu, usage->load[cpu]); + } + printk(KERN_ERR "]\n"); +} + +static int check_up(void) +{ + int num_hist = hotplug_histories->num_hist; + struct cpu_usage *usage; + int freq, rq_avg; + int avg_load; + int i; + int up_rate = dbs_tuners_ins.cpu_up_rate; + unsigned int up_avg_load = dbs_tuners_ins.up_avg_load; + unsigned int hotplug_compare_level = dbs_tuners_ins.hotplug_compare_level; + int up_freq, up_rq; + int min_freq = INT_MAX; + int min_rq_avg = INT_MAX; + int min_avg_load = INT_MAX; + int online; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock > 0) + return 0; + + online = num_online_cpus(); + up_freq = hotplug_freq[online - 1][HOTPLUG_UP_INDEX]; + up_rq = hotplug_rq[online - 1][HOTPLUG_UP_INDEX]; + + if (online == num_possible_cpus()) + return 0; + + if (dbs_tuners_ins.max_cpu_lock != 0 + && online >= dbs_tuners_ins.max_cpu_lock) + return 0; + + if (dbs_tuners_ins.min_cpu_lock != 0 + && online < dbs_tuners_ins.min_cpu_lock) + return 1; + + if (num_hist == 0 || num_hist % up_rate) + return 0; + + if (hotplug_compare_level == 0) { + for (i = num_hist - 1; i >= num_hist - up_rate; --i) { + usage = &hotplug_histories->usage[i]; + + freq = usage->freq; + rq_avg = usage->rq_avg; + avg_load = usage->avg_load; + + min_freq = min(min_freq, freq); + min_rq_avg = min(min_rq_avg, rq_avg); + min_avg_load = min(min_avg_load, avg_load); + + if (dbs_tuners_ins.dvfs_debug) + debug_hotplug_check(1, rq_avg, freq, usage); + } + } else { + usage = &hotplug_histories->usage[num_hist - 1]; + min_freq = usage->freq; + min_rq_avg = usage->rq_avg; + min_avg_load = usage->avg_load; + if (dbs_tuners_ins.dvfs_debug) + debug_hotplug_check(1, min_rq_avg, min_freq, usage); + } + + if (min_freq >= up_freq && min_rq_avg > up_rq) { + if (online >= 1) { + if (min_avg_load < up_avg_load) + return 0; + } + printk(KERN_ERR "[HOTPLUG IN] %s %d>=%d && %d>%d\n", + __func__, min_freq, up_freq, min_rq_avg, up_rq); + hotplug_histories->num_hist = 0; + return 1; + } + return 0; +} + +static int check_down(void) +{ + int num_hist = hotplug_histories->num_hist; + struct cpu_usage *usage; + int freq, rq_avg; + int avg_load; + int i; + int down_rate = dbs_tuners_ins.cpu_down_rate; + unsigned int down_avg_load = dbs_tuners_ins.down_avg_load; + unsigned int hotplug_compare_level = dbs_tuners_ins.hotplug_compare_level; + int down_freq, down_rq; + int max_freq = 0; + int max_rq_avg = 0; + int max_avg_load = 0; + int online; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock > 0) + return 0; + + online = num_online_cpus(); + down_freq = hotplug_freq[online - 1][HOTPLUG_DOWN_INDEX]; + down_rq = hotplug_rq[online - 1][HOTPLUG_DOWN_INDEX]; + + if (online == 1) + return 0; + + if (dbs_tuners_ins.max_cpu_lock != 0 + && online > dbs_tuners_ins.max_cpu_lock) + return 1; + + if (dbs_tuners_ins.min_cpu_lock != 0 + && online <= dbs_tuners_ins.min_cpu_lock) + return 0; + + if (num_hist == 0 || num_hist % down_rate) + return 0; + + if (hotplug_compare_level == 0) { + for (i = num_hist - 1; i >= num_hist - down_rate; --i) { + usage = &hotplug_histories->usage[i]; + + freq = usage->freq; + rq_avg = usage->rq_avg; + avg_load = usage->avg_load; + + max_freq = max(max_freq, freq); + max_rq_avg = max(max_rq_avg, rq_avg); + max_avg_load = max(max_avg_load, avg_load); + + if (dbs_tuners_ins.dvfs_debug) + debug_hotplug_check(0, rq_avg, freq, usage); + } + } else { + usage = &hotplug_histories->usage[num_hist - 1]; + max_freq = usage->freq; + max_rq_avg = usage->rq_avg; + max_avg_load = usage->avg_load; + if (dbs_tuners_ins.dvfs_debug) + debug_hotplug_check(0, max_rq_avg, max_freq, usage); + } + + if ((max_freq <= down_freq && max_rq_avg <= down_rq) || (online >= 2 && max_avg_load < down_avg_load)) { + printk(KERN_ERR "[HOTPLUG OUT] %s %d<=%d && %d<%d\n", + __func__, max_freq, down_freq, max_rq_avg, down_rq); + hotplug_histories->num_hist = 0; + return 1; + } + + return 0; +} + +static void dbs_check_cpu(struct cpufreq_nightmare_cpuinfo *this_dbs_info) +{ + struct cpufreq_policy *policy; + unsigned int j; + int num_hist = hotplug_histories->num_hist; + int max_hotplug_rate = max(dbs_tuners_ins.cpu_up_rate,dbs_tuners_ins.cpu_down_rate); + int inc_cpu_load = dbs_tuners_ins.inc_cpu_load; + int dec_cpu_load = dbs_tuners_ins.dec_cpu_load; + unsigned int avg_rate_mult = 0; + + /* add total_load, avg_load to get average load */ + unsigned int total_load = 0; + unsigned int avg_load = 0; + int rq_avg = 0; + policy = this_dbs_info->cur_policy; + + hotplug_histories->usage[num_hist].freq = policy->cur; + hotplug_histories->usage[num_hist].rq_avg = get_nr_run_avg(); + + /* add total_load, avg_load to get average load */ + rq_avg = hotplug_histories->usage[num_hist].rq_avg; + + ++hotplug_histories->num_hist; + + for_each_cpu(j, policy->cpus) { + struct cpufreq_nightmare_cpuinfo *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + cputime64_t prev_wall_time, prev_idle_time, prev_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + int load; + //int freq_avg; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + prev_wall_time = j_dbs_info->prev_cpu_wall; + prev_idle_time = j_dbs_info->prev_cpu_idle; + prev_iowait_time = j_dbs_info->prev_cpu_iowait; + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + prev_wall_time); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + prev_idle_time); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, + prev_iowait_time); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + u64 cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + if (cpu_online(j)) { + total_load += load; + hotplug_histories->usage[num_hist].load[j] = load; + } else { + hotplug_histories->usage[num_hist].load[j] = -1; + } + + } + /* calculate the average load across all related CPUs */ + avg_load = total_load / num_online_cpus(); + hotplug_histories->usage[num_hist].avg_load = avg_load; + + /* Check for CPU hotplug */ + if (check_up()) { + queue_work_on(this_dbs_info->cpu, dvfs_workqueues,&this_dbs_info->up_work); + } + else if (check_down()) { + queue_work_on(this_dbs_info->cpu, dvfs_workqueues,&this_dbs_info->down_work); + } + if (hotplug_histories->num_hist == max_hotplug_rate) + hotplug_histories->num_hist = 0; + + /* CPUs Online Scale Frequency*/ + for_each_cpu(j, policy->cpus) { + struct cpufreq_nightmare_cpuinfo *j_dbs_info; + int load; + int index; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + if (cpu_online(j)) { + index = 0; + load = hotplug_histories->usage[num_hist].load[j]; + // just a tips to scale up the frequency fastly + if (j_dbs_info->cur_policy->cur < dbs_tuners_ins.freq_for_responsiveness) + inc_cpu_load = dbs_tuners_ins.inc_cpu_load_at_min_freq; + else + inc_cpu_load = dbs_tuners_ins.inc_cpu_load; + + // Check for frequency increase or for frequency decrease + if (load >= inc_cpu_load) { + unsigned int inc_load = (load * j_dbs_info->cur_policy->min) / 100; + unsigned int inc_step = (dbs_tuners_ins.freq_step * j_dbs_info->cur_policy->min) / 100; + unsigned int inc; + unsigned int freq_up = 0; + + avg_rate_mult += dbs_tuners_ins.sampling_up_factor; + + // if we cannot increment the frequency anymore, break out early + if (j_dbs_info->cur_policy->cur == j_dbs_info->cur_policy->max) { + continue; + } + + inc = inc_load + inc_step; + inc -= (dbs_tuners_ins.freq_up_brake * j_dbs_info->cur_policy->min) / 100; + + freq_up = min(j_dbs_info->cur_policy->max,j_dbs_info->cur_policy->cur + inc); + + if (freq_up != j_dbs_info->cur_policy->cur) { + __cpufreq_driver_target(j_dbs_info->cur_policy, freq_up, CPUFREQ_RELATION_L); + } + + } + else if (load < dec_cpu_load && load > -1) { + unsigned int dec_load = ((100 - load) * (j_dbs_info->cur_policy->min)) / 100; + unsigned int dec_step = (dbs_tuners_ins.freq_step_dec * (j_dbs_info->cur_policy->min)) / 100; + unsigned int dec; + unsigned int freq_down = 0; + + avg_rate_mult += dbs_tuners_ins.sampling_down_factor; + + // if we cannot reduce the frequency anymore, break out early + if (j_dbs_info->cur_policy->cur == j_dbs_info->cur_policy->min) { + continue; + } + + dec = dec_load + dec_step; + + freq_down = max(j_dbs_info->cur_policy->min,j_dbs_info->cur_policy->cur - dec); + + if (freq_down != j_dbs_info->cur_policy->cur) { + __cpufreq_driver_target(j_dbs_info->cur_policy, freq_down, CPUFREQ_RELATION_L); + } + } + } + } + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + if (avg_rate_mult > 0) + this_dbs_info->avg_rate_mult = (avg_rate_mult * 10) / num_online_cpus(); + else + this_dbs_info->avg_rate_mult = 10; + + return; +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpufreq_nightmare_cpuinfo *dbs_info = + container_of(work, struct cpufreq_nightmare_cpuinfo, work.work); + unsigned int cpu = dbs_info->cpu; + int delay; + + mutex_lock(&dbs_info->timer_mutex); + + dbs_check_cpu(dbs_info); + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + delay = usecs_to_jiffies((dbs_tuners_ins.sampling_rate * (dbs_info->avg_rate_mult < 10 ? 10 : dbs_info->avg_rate_mult)) / 10); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + queue_delayed_work_on(cpu, dvfs_workqueues, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpufreq_nightmare_cpuinfo *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(DEF_START_DELAY * 1000 * 1000 + + dbs_tuners_ins.sampling_rate); + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + INIT_WORK(&dbs_info->up_work, cpu_up_work); + INIT_WORK(&dbs_info->down_work, cpu_down_work); + + queue_delayed_work_on(dbs_info->cpu, dvfs_workqueues, + &dbs_info->work, delay + 2 * HZ); +} + +static inline void dbs_timer_exit(struct cpufreq_nightmare_cpuinfo *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); + cancel_work_sync(&dbs_info->up_work); + cancel_work_sync(&dbs_info->down_work); +} + +static int reboot_notifier_call(struct notifier_block *this, + unsigned long code, void *_cmd) +{ + atomic_set(&g_hotplug_lock, 1); + return NOTIFY_DONE; +} + +static struct notifier_block reboot_notifier = { + .notifier_call = reboot_notifier_call, +}; + +#ifdef CONFIG_HAS_EARLYSUSPEND +static struct early_suspend early_suspend; +unsigned int previous_freq_step; +unsigned int previous_sampling_rate; +static void cpufreq_nightmare_early_suspend(struct early_suspend *h) +{ +#if EARLYSUSPEND_HOTPLUGLOCK + dbs_tuners_ins.early_suspend = + atomic_read(&g_hotplug_lock); +#endif + previous_freq_step = dbs_tuners_ins.freq_step; + previous_sampling_rate = dbs_tuners_ins.sampling_rate; + dbs_tuners_ins.freq_step = 10; + dbs_tuners_ins.sampling_rate = 200000; +#if EARLYSUSPEND_HOTPLUGLOCK + atomic_set(&g_hotplug_lock, + (dbs_tuners_ins.min_cpu_lock) ? dbs_tuners_ins.min_cpu_lock : 1); + apply_hotplug_lock(); + stop_rq_work(); +#endif +} +static void cpufreq_nightmare_late_resume(struct early_suspend *h) +{ +#if EARLYSUSPEND_HOTPLUGLOCK + atomic_set(&g_hotplug_lock, dbs_tuners_ins.early_suspend); +#endif + dbs_tuners_ins.early_suspend = -1; + dbs_tuners_ins.freq_step = previous_freq_step; + dbs_tuners_ins.sampling_rate = previous_sampling_rate; +#if EARLYSUSPEND_HOTPLUGLOCK + apply_hotplug_lock(); + start_rq_work(); +#endif +} +#endif + +static int cpufreq_governor_nightmare(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpufreq_nightmare_cpuinfo *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + dbs_tuners_ins.max_freq = policy->max; + dbs_tuners_ins.min_freq = policy->min; + hotplug_histories->num_hist = 0; + start_rq_work(); + + mutex_lock(&dbs_mutex); + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpufreq_nightmare_cpuinfo *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + this_dbs_info->cpu = cpu; + this_dbs_info->avg_rate_mult = 20; + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + min_sampling_rate = MIN_SAMPLING_RATE; + dbs_tuners_ins.sampling_rate = DEF_SAMPLING_RATE; + dbs_tuners_ins.io_is_busy = 0; + } + mutex_unlock(&dbs_mutex); + + register_reboot_notifier(&reboot_notifier); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + +#if !EARLYSUSPEND_HOTPLUGLOCK + register_pm_notifier(&pm_notifier); +#endif +#ifdef CONFIG_HAS_EARLYSUSPEND + register_early_suspend(&early_suspend); +#endif + break; + + case CPUFREQ_GOV_STOP: +#ifdef CONFIG_HAS_EARLYSUSPEND + unregister_early_suspend(&early_suspend); +#endif +#if !EARLYSUSPEND_HOTPLUGLOCK + unregister_pm_notifier(&pm_notifier); +#endif + + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + mutex_destroy(&this_dbs_info->timer_mutex); + + unregister_reboot_notifier(&reboot_notifier); + + dbs_enable--; + mutex_unlock(&dbs_mutex); + + stop_rq_work(); + + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, + CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, + CPUFREQ_RELATION_L); + + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +static int __init cpufreq_gov_nightmare_init(void) +{ + int ret; + + ret = init_rq_avg(); + if (ret) + return ret; + + hotplug_histories = kzalloc(sizeof(struct cpu_usage_history), GFP_KERNEL); + if (!hotplug_histories) { + pr_err("%s cannot create hotplug history array\n", __func__); + ret = -ENOMEM; + goto err_hist; + } + + dvfs_workqueues = create_workqueue("knightmare"); + if (!dvfs_workqueues) { + pr_err("%s cannot create workqueue\n", __func__); + ret = -ENOMEM; + goto err_queue; + } + + ret = cpufreq_register_governor(&cpufreq_gov_nightmare); + if (ret) + goto err_reg; + +#ifdef CONFIG_HAS_EARLYSUSPEND + early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; + early_suspend.suspend = cpufreq_nightmare_early_suspend; + early_suspend.resume = cpufreq_nightmare_late_resume; +#endif + + return ret; + +err_reg: + destroy_workqueue(dvfs_workqueues); +err_queue: + kfree(hotplug_histories); +err_hist: + kfree(rq_data); + return ret; +} + +static void __exit cpufreq_gov_nightmare_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_nightmare); + destroy_workqueue(dvfs_workqueues); + kfree(hotplug_histories); + kfree(rq_data); +} + +MODULE_AUTHOR("ByungChang Cha "); +MODULE_DESCRIPTION("'cpufreq_nightmare' - A dynamic cpufreq/cpuhotplug governor"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_NIGHTMARE +fs_initcall(cpufreq_gov_nightmare_init); +#else +module_init(cpufreq_gov_nightmare_init); +#endif +module_exit(cpufreq_gov_nightmare_exit); diff --git a/drivers/cpufreq/cpufreq_pegasusq.c b/drivers/cpufreq/cpufreq_pegasusq.c new file mode 100644 index 00000000..230abf81 --- /dev/null +++ b/drivers/cpufreq/cpufreq_pegasusq.c @@ -0,0 +1,1636 @@ +/* + * drivers/cpufreq/cpufreq_pegasusq.c + * + * Copyright (C) 2011 Samsung Electronics co. ltd + * ByungChang Cha + * + * Based on ondemand governor + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_HAS_EARLYSUSPEND +#include +#endif +#define EARLYSUSPEND_HOTPLUGLOCK 1 + +/* + * runqueue average + */ + +#define RQ_AVG_TIMER_RATE 10 + +static bool boostpulse_relayf = false; +static unsigned int boostpulse_relay_sr = 0; +static unsigned int Lboostpulse_value = 1134000; + +extern void apenable_auto_hotplug(bool state); +extern bool apget_enable_auto_hotplug(void); +static bool prev_apenable; + +struct runqueue_data { + unsigned int nr_run_avg; + unsigned int update_rate; + int64_t last_time; + int64_t total_time; + struct delayed_work work; + struct workqueue_struct *nr_run_wq; + spinlock_t lock; +}; + +static struct runqueue_data *rq_data; +static void rq_work_fn(struct work_struct *work); + +static void start_rq_work(void) +{ + rq_data->nr_run_avg = 0; + rq_data->last_time = 0; + rq_data->total_time = 0; + if (rq_data->nr_run_wq == NULL) + rq_data->nr_run_wq = + create_singlethread_workqueue("nr_run_avg"); + + queue_delayed_work(rq_data->nr_run_wq, &rq_data->work, + msecs_to_jiffies(rq_data->update_rate)); + return; +} + +static void stop_rq_work(void) +{ + if (rq_data->nr_run_wq) + cancel_delayed_work(&rq_data->work); + return; +} + +static int __init init_rq_avg(void) +{ + rq_data = kzalloc(sizeof(struct runqueue_data), GFP_KERNEL); + if (rq_data == NULL) { + pr_err("%s cannot allocate memory\n", __func__); + return -ENOMEM; + } + spin_lock_init(&rq_data->lock); + rq_data->update_rate = RQ_AVG_TIMER_RATE; + INIT_DELAYED_WORK_DEFERRABLE(&rq_data->work, rq_work_fn); + + return 0; +} + +static void rq_work_fn(struct work_struct *work) +{ + int64_t time_diff = 0; + int64_t nr_run = 0; + unsigned long flags = 0; + int64_t cur_time = ktime_to_ns(ktime_get()); + + spin_lock_irqsave(&rq_data->lock, flags); + + if (rq_data->last_time == 0) + rq_data->last_time = cur_time; + if (rq_data->nr_run_avg == 0) + rq_data->total_time = 0; + + nr_run = nr_running() * 100; + time_diff = cur_time - rq_data->last_time; + do_div(time_diff, 1000 * 1000); + + if (time_diff != 0 && rq_data->total_time != 0) { + nr_run = (nr_run * time_diff) + + (rq_data->nr_run_avg * rq_data->total_time); + do_div(nr_run, rq_data->total_time + time_diff); + } + rq_data->nr_run_avg = nr_run; + rq_data->total_time += time_diff; + rq_data->last_time = cur_time; + + if (rq_data->update_rate != 0) + queue_delayed_work(rq_data->nr_run_wq, &rq_data->work, + msecs_to_jiffies(rq_data->update_rate)); + + spin_unlock_irqrestore(&rq_data->lock, flags); +} + +static unsigned int get_nr_run_avg(void) +{ + unsigned int nr_run_avg; + unsigned long flags = 0; + + spin_lock_irqsave(&rq_data->lock, flags); + nr_run_avg = rq_data->nr_run_avg; + rq_data->nr_run_avg = 0; + spin_unlock_irqrestore(&rq_data->lock, flags); + + return nr_run_avg; +} + + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_SAMPLING_DOWN_FACTOR (3) +#define MAX_SAMPLING_DOWN_FACTOR (100000) +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (14) +#define DEF_FREQUENCY_UP_THRESHOLD (95) + +/* for multiple freq_step */ +#define DEF_UP_THRESHOLD_DIFF (5) + +#define DEF_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) +#define DEF_SAMPLING_RATE (40000) +#define MIN_SAMPLING_RATE (10000) +#define MAX_HOTPLUG_RATE (40u) + +#define DEF_MAX_CPU_LOCK (0) +#define DEF_MIN_CPU_LOCK (0) +#define DEF_CPU_UP_FREQ (500000) +#define DEF_CPU_DOWN_FREQ (200000) +#define DEF_UP_NR_CPUS (1) +#define DEF_CPU_UP_RATE (9) +#define DEF_CPU_DOWN_RATE (3) +#define DEF_FREQ_STEP (30) +/* for multiple freq_step */ +#define DEF_FREQ_STEP_DEC (13) + +#define DEF_START_DELAY (0) + +#define UP_THRESHOLD_AT_MIN_FREQ (55) +#define FREQ_FOR_RESPONSIVENESS (400000) +/* for fast decrease */ +#define FREQ_FOR_FAST_DOWN (1200000) +#define UP_THRESHOLD_AT_FAST_DOWN (95) + +#define HOTPLUG_DOWN_INDEX (0) +#define HOTPLUG_UP_INDEX (1) + +#ifdef CONFIG_MACH_MIDAS +static int hotplug_rq[4][2] = { + {0, 100}, {100, 200}, {200, 300}, {300, 0} +}; + +static int hotplug_freq[4][2] = { + {0, 500000}, + {200000, 600000}, + {500000, 800000}, + {500000, 0} +}; +#else +static int hotplug_rq[4][2] = { + {0, 200}, {200, 200}, {200, 300}, {300, 0} +}; + +static int hotplug_freq[4][2] = { + {0, 800000}, + {500000, 500000}, + {200000, 500000}, + {200000, 0} +}; +#endif + +static unsigned int min_sampling_rate; + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_PEGASUSQ +static +#endif +struct cpufreq_governor cpufreq_gov_pegasusq = { + .name = "pegasusq", + .governor = cpufreq_governor_dbs, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct work_struct up_work; + struct work_struct down_work; + struct cpufreq_frequency_table *freq_table; + unsigned int rate_mult; + int cpu; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); + +struct workqueue_struct *dvfs_workqueue; + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int ignore_nice; + unsigned int sampling_down_factor; + unsigned int io_is_busy; + /* pegasusq tuners */ + unsigned int freq_step; + unsigned int cpu_up_rate; + unsigned int cpu_down_rate; + unsigned int cpu_up_freq; + unsigned int cpu_down_freq; + unsigned int up_nr_cpus; + unsigned int max_cpu_lock; + unsigned int min_cpu_lock; + atomic_t hotplug_lock; + unsigned int dvfs_debug; + unsigned int max_freq; + unsigned int min_freq; +#ifdef CONFIG_HAS_EARLYSUSPEND + int early_suspend; +#endif + unsigned int up_threshold_at_min_freq; + unsigned int freq_for_responsiveness; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .ignore_nice = 1, + .freq_step = DEF_FREQ_STEP, + .cpu_up_rate = DEF_CPU_UP_RATE, + .cpu_down_rate = DEF_CPU_DOWN_RATE, + .cpu_up_freq = DEF_CPU_UP_FREQ, + .cpu_down_freq = DEF_CPU_DOWN_FREQ, + .up_nr_cpus = DEF_UP_NR_CPUS, + .max_cpu_lock = DEF_MAX_CPU_LOCK, + .min_cpu_lock = DEF_MIN_CPU_LOCK, + .hotplug_lock = ATOMIC_INIT(0), + .dvfs_debug = 0, +#ifdef CONFIG_HAS_EARLYSUSPEND + .early_suspend = -1, +#endif + .up_threshold_at_min_freq = UP_THRESHOLD_AT_MIN_FREQ, + .freq_for_responsiveness = FREQ_FOR_RESPONSIVENESS, +}; + + +/* + * CPU hotplug lock interface + */ + +static atomic_t g_hotplug_count = ATOMIC_INIT(0); +static atomic_t g_hotplug_lock = ATOMIC_INIT(0); + +static void apply_hotplug_lock(void) +{ + int online, possible, lock, flag; + struct work_struct *work; + struct cpu_dbs_info_s *dbs_info; + + /* do turn_on/off cpus */ + dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ + online = num_online_cpus(); + possible = num_possible_cpus(); + lock = atomic_read(&g_hotplug_lock); + flag = lock - online; + + if (flag == 0) + return; + + work = flag > 0 ? &dbs_info->up_work : &dbs_info->down_work; + + pr_debug("%s online %d possible %d lock %d flag %d %d\n", + __func__, online, possible, lock, flag, (int)abs(flag)); + + queue_work_on(dbs_info->cpu, dvfs_workqueue, work); +} + +int cpufreq_pegasusq_cpu_lock(int num_core) +{ + int prev_lock; + + if (num_core < 1 || num_core > num_possible_cpus()) + return -EINVAL; + + prev_lock = atomic_read(&g_hotplug_lock); + + if (prev_lock != 0 && prev_lock < num_core) + return -EINVAL; + else if (prev_lock == num_core) + atomic_inc(&g_hotplug_count); + + atomic_set(&g_hotplug_lock, num_core); + atomic_set(&g_hotplug_count, 1); + apply_hotplug_lock(); + + return 0; +} + +int cpufreq_pegasusq_cpu_unlock(int num_core) +{ + int prev_lock = atomic_read(&g_hotplug_lock); + + if (prev_lock < num_core) + return 0; + else if (prev_lock == num_core) + atomic_dec(&g_hotplug_count); + + if (atomic_read(&g_hotplug_count) == 0) + atomic_set(&g_hotplug_lock, 0); + + return 0; +} + +void cpufreq_pegasusq_min_cpu_lock(unsigned int num_core) +{ + int online, flag; + struct cpu_dbs_info_s *dbs_info; + + dbs_tuners_ins.min_cpu_lock = min(num_core, num_possible_cpus()); + + dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ + online = num_online_cpus(); + flag = (int)num_core - online; + if (flag <= 0) + return; + queue_work_on(dbs_info->cpu, dvfs_workqueue, &dbs_info->up_work); +} + +void cpufreq_pegasusq_min_cpu_unlock(void) +{ + int online, lock, flag; + struct cpu_dbs_info_s *dbs_info; + + dbs_tuners_ins.min_cpu_lock = 0; + + dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ + online = num_online_cpus(); + lock = atomic_read(&g_hotplug_lock); + if (lock == 0) + return; + flag = lock - online; + if (flag >= 0) + return; + queue_work_on(dbs_info->cpu, dvfs_workqueue, &dbs_info->down_work); +} + +/* + * History of CPU usage + */ +struct cpu_usage { + unsigned int freq; + unsigned int load[NR_CPUS]; + unsigned int rq_avg; + unsigned int avg_load; +}; + +struct cpu_usage_history { + struct cpu_usage usage[MAX_HOTPLUG_RATE]; + unsigned int num_hist; +}; + +struct cpu_usage_history *hotplug_history; + +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, + u64 *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + + return idle_time; +} + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, + cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +static ssize_t show_boostpulse_value(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", Lboostpulse_value / 1000); +} + +define_one_global_ro(sampling_rate_min); + +/* cpufreq_pegasusq Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(sampling_down_factor, sampling_down_factor); +show_one(ignore_nice_load, ignore_nice); +show_one(down_differential, down_differential); +show_one(freq_step, freq_step); +show_one(cpu_up_rate, cpu_up_rate); +show_one(cpu_down_rate, cpu_down_rate); +show_one(cpu_up_freq, cpu_up_freq); +show_one(cpu_down_freq, cpu_down_freq); +show_one(up_nr_cpus, up_nr_cpus); +show_one(max_cpu_lock, max_cpu_lock); +show_one(min_cpu_lock, min_cpu_lock); +show_one(dvfs_debug, dvfs_debug); +show_one(up_threshold_at_min_freq, up_threshold_at_min_freq); +show_one(freq_for_responsiveness, freq_for_responsiveness); +static ssize_t show_hotplug_lock(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", atomic_read(&g_hotplug_lock)); +} + +#define show_hotplug_param(file_name, num_core, up_down) \ +static ssize_t show_##file_name##_##num_core##_##up_down \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", file_name[num_core - 1][up_down]); \ +} + +#define store_hotplug_param(file_name, num_core, up_down) \ +static ssize_t store_##file_name##_##num_core##_##up_down \ +(struct kobject *kobj, struct attribute *attr, \ + const char *buf, size_t count) \ +{ \ + unsigned int input; \ + int ret; \ + ret = sscanf(buf, "%u", &input); \ + if (ret != 1) \ + return -EINVAL; \ + file_name[num_core - 1][up_down] = input; \ + return count; \ +} + +show_hotplug_param(hotplug_freq, 1, 1); +show_hotplug_param(hotplug_freq, 2, 0); +show_hotplug_param(hotplug_freq, 2, 1); +show_hotplug_param(hotplug_freq, 3, 0); +show_hotplug_param(hotplug_freq, 3, 1); +show_hotplug_param(hotplug_freq, 4, 0); + +show_hotplug_param(hotplug_rq, 1, 1); +show_hotplug_param(hotplug_rq, 2, 0); +show_hotplug_param(hotplug_rq, 2, 1); +show_hotplug_param(hotplug_rq, 3, 0); +show_hotplug_param(hotplug_rq, 3, 1); +show_hotplug_param(hotplug_rq, 4, 0); + +store_hotplug_param(hotplug_freq, 1, 1); +store_hotplug_param(hotplug_freq, 2, 0); +store_hotplug_param(hotplug_freq, 2, 1); +store_hotplug_param(hotplug_freq, 3, 0); +store_hotplug_param(hotplug_freq, 3, 1); +store_hotplug_param(hotplug_freq, 4, 0); + +store_hotplug_param(hotplug_rq, 1, 1); +store_hotplug_param(hotplug_rq, 2, 0); +store_hotplug_param(hotplug_rq, 2, 1); +store_hotplug_param(hotplug_rq, 3, 0); +store_hotplug_param(hotplug_rq, 3, 1); +store_hotplug_param(hotplug_rq, 4, 0); + +define_one_global_rw(hotplug_freq_1_1); +define_one_global_rw(hotplug_freq_2_0); +define_one_global_rw(hotplug_freq_2_1); +define_one_global_rw(hotplug_freq_3_0); +define_one_global_rw(hotplug_freq_3_1); +define_one_global_rw(hotplug_freq_4_0); + +define_one_global_rw(hotplug_rq_1_1); +define_one_global_rw(hotplug_rq_2_0); +define_one_global_rw(hotplug_rq_2_1); +define_one_global_rw(hotplug_rq_3_0); +define_one_global_rw(hotplug_rq_3_1); +define_one_global_rw(hotplug_rq_4_0); + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.io_is_busy = !!input; + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold = input; + return count; +} + +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input, j; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + dbs_tuners_ins.sampling_down_factor = input; + + /* Reset down sampling multiplier in case it was active */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->rate_mult = 1; + } + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = + get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + return count; +} + +static ssize_t store_down_differential(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.down_differential = min(input, 100u); + return count; +} + +static ssize_t store_freq_step(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.freq_step = min(input, 100u); + return count; +} + +static ssize_t store_cpu_up_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.cpu_up_rate = min(input, MAX_HOTPLUG_RATE); + return count; +} + +static ssize_t store_cpu_down_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.cpu_down_rate = min(input, MAX_HOTPLUG_RATE); + return count; +} + +static ssize_t store_cpu_up_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.cpu_up_freq = min(input, dbs_tuners_ins.max_freq); + return count; +} + +static ssize_t store_cpu_down_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.cpu_down_freq = max(input, dbs_tuners_ins.min_freq); + return count; +} + +static ssize_t store_up_nr_cpus(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.up_nr_cpus = min(input, num_possible_cpus()); + return count; +} + +static ssize_t store_max_cpu_lock(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.max_cpu_lock = min(input, num_possible_cpus()); + return count; +} + +static ssize_t store_min_cpu_lock(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + if (input == 0) + cpufreq_pegasusq_min_cpu_unlock(); + else + cpufreq_pegasusq_min_cpu_lock(input); + return count; +} + +static ssize_t store_hotplug_lock(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + int prev_lock; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + input = min(input, num_possible_cpus()); + prev_lock = atomic_read(&dbs_tuners_ins.hotplug_lock); + + if (prev_lock) + cpufreq_pegasusq_cpu_unlock(prev_lock); + + if (input == 0) { + atomic_set(&dbs_tuners_ins.hotplug_lock, 0); + return count; + } + + ret = cpufreq_pegasusq_cpu_lock(input); + if (ret) { + printk(KERN_ERR "[HOTPLUG] already locked with smaller value %d < %d\n", + atomic_read(&g_hotplug_lock), input); + return ret; + } + + atomic_set(&dbs_tuners_ins.hotplug_lock, input); + + return count; +} + +static ssize_t store_dvfs_debug(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.dvfs_debug = input > 0; + return count; +} + +static ssize_t store_up_threshold_at_min_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold_at_min_freq = input; + return count; +} + +static ssize_t store_freq_for_responsiveness(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.freq_for_responsiveness = input; + return count; +} + +static ssize_t store_boostpulse_value(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input * 1000 > 2106000) + input = 2106000; + + Lboostpulse_value = input * 1000; + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(down_differential); +define_one_global_rw(freq_step); +define_one_global_rw(cpu_up_rate); +define_one_global_rw(cpu_down_rate); +define_one_global_rw(cpu_up_freq); +define_one_global_rw(cpu_down_freq); +define_one_global_rw(up_nr_cpus); +define_one_global_rw(max_cpu_lock); +define_one_global_rw(min_cpu_lock); +define_one_global_rw(hotplug_lock); +define_one_global_rw(dvfs_debug); +define_one_global_rw(up_threshold_at_min_freq); +define_one_global_rw(freq_for_responsiveness); +define_one_global_rw(boostpulse_value); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &up_threshold.attr, + &sampling_down_factor.attr, + &ignore_nice_load.attr, + &io_is_busy.attr, + &down_differential.attr, + &freq_step.attr, + &cpu_up_rate.attr, + &cpu_down_rate.attr, + &cpu_up_freq.attr, + &cpu_down_freq.attr, + &up_nr_cpus.attr, + /* priority: hotplug_lock > max_cpu_lock > min_cpu_lock + Exception: hotplug_lock on early_suspend uses min_cpu_lock */ + &max_cpu_lock.attr, + &min_cpu_lock.attr, + &hotplug_lock.attr, + &dvfs_debug.attr, + &hotplug_freq_1_1.attr, + &hotplug_freq_2_0.attr, + &hotplug_freq_2_1.attr, + &hotplug_freq_3_0.attr, + &hotplug_freq_3_1.attr, + &hotplug_freq_4_0.attr, + &hotplug_rq_1_1.attr, + &hotplug_rq_2_0.attr, + &hotplug_rq_2_1.attr, + &hotplug_rq_3_0.attr, + &hotplug_rq_3_1.attr, + &hotplug_rq_4_0.attr, + &up_threshold_at_min_freq.attr, + &freq_for_responsiveness.attr, + &boostpulse_value.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "pegasusq", +}; + +/************************** sysfs end ************************/ + +static void __cpuinit cpu_up_work(struct work_struct *work) +{ + int cpu; + int online = num_online_cpus(); + int nr_up = dbs_tuners_ins.up_nr_cpus; + int min_cpu_lock = dbs_tuners_ins.min_cpu_lock; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock && min_cpu_lock) + nr_up = max(hotplug_lock, min_cpu_lock) - online; + else if (hotplug_lock) + nr_up = hotplug_lock - online; + else if (min_cpu_lock) + nr_up = max(nr_up, min_cpu_lock - online); + + if (online == 1) { + printk(KERN_ERR "CPU_UP 3\n"); + cpu_up(num_possible_cpus() - 1); + nr_up -= 1; + } + + for_each_cpu_not(cpu, cpu_online_mask) { + if (nr_up-- == 0) + break; + if (cpu == 0) + continue; + printk(KERN_ERR "CPU_UP %d\n", cpu); + cpu_up(cpu); + } +} + +static void cpu_down_work(struct work_struct *work) +{ + int cpu; + int online = num_online_cpus(); + int nr_down = 1; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock) + nr_down = online - hotplug_lock; + + for_each_online_cpu(cpu) { + if (cpu == 0) + continue; + printk(KERN_ERR "CPU_DOWN %d\n", cpu); + cpu_down(cpu); + if (--nr_down == 0) + break; + } +} + +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ +#ifndef CONFIG_ARCH_EXYNOS4 + if (p->cur == p->max) + return; +#endif + + __cpufreq_driver_target(p, freq, CPUFREQ_RELATION_L); +} + +/* + * print hotplug debugging info. + * which 1 : UP, 0 : DOWN + */ +static void debug_hotplug_check(int which, int rq_avg, int freq, + struct cpu_usage *usage) +{ + int cpu; + printk(KERN_ERR "CHECK %s rq %d.%02d freq %d [", which ? "up" : "down", + rq_avg / 100, rq_avg % 100, freq); + for_each_online_cpu(cpu) { + printk(KERN_ERR "(%d, %d), ", cpu, usage->load[cpu]); + } + printk(KERN_ERR "]\n"); +} + +static int check_up(void) +{ + int num_hist = hotplug_history->num_hist; + struct cpu_usage *usage; + int freq, rq_avg; + int avg_load; + int i; + int up_rate = dbs_tuners_ins.cpu_up_rate; + int up_freq, up_rq; + int min_freq = INT_MAX; + int min_rq_avg = INT_MAX; + int min_avg_load = INT_MAX; + int online; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock > 0) + return 0; + + online = num_online_cpus(); + up_freq = hotplug_freq[online - 1][HOTPLUG_UP_INDEX]; + up_rq = hotplug_rq[online - 1][HOTPLUG_UP_INDEX]; + + if (online == num_possible_cpus()) + return 0; + + if (dbs_tuners_ins.max_cpu_lock != 0 + && online >= dbs_tuners_ins.max_cpu_lock) + return 0; + + if (dbs_tuners_ins.min_cpu_lock != 0 + && online < dbs_tuners_ins.min_cpu_lock) + return 1; + + if (num_hist == 0 || num_hist % up_rate) + return 0; + + for (i = num_hist - 1; i >= num_hist - up_rate; --i) { + usage = &hotplug_history->usage[i]; + + freq = usage->freq; + rq_avg = usage->rq_avg; + avg_load = usage->avg_load; + + min_freq = min(min_freq, freq); + min_rq_avg = min(min_rq_avg, rq_avg); + min_avg_load = min(min_avg_load, avg_load); + + if (dbs_tuners_ins.dvfs_debug) + debug_hotplug_check(1, rq_avg, freq, usage); + } + + if (min_freq >= up_freq && min_rq_avg > up_rq) { + if (online >= 2) { + if (min_avg_load < 65) + return 0; + } + printk(KERN_ERR "[HOTPLUG IN] %s %d>=%d && %d>%d\n", + __func__, min_freq, up_freq, min_rq_avg, up_rq); + hotplug_history->num_hist = 0; + return 1; + } + return 0; +} + +static int check_down(void) +{ + int num_hist = hotplug_history->num_hist; + struct cpu_usage *usage; + int freq, rq_avg; + int avg_load; + int i; + int down_rate = dbs_tuners_ins.cpu_down_rate; + int down_freq, down_rq; + int max_freq = 0; + int max_rq_avg = 0; + int max_avg_load = 0; + int online; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock > 0) + return 0; + + online = num_online_cpus(); + down_freq = hotplug_freq[online - 1][HOTPLUG_DOWN_INDEX]; + down_rq = hotplug_rq[online - 1][HOTPLUG_DOWN_INDEX]; + + if (online == 1) + return 0; + + if (dbs_tuners_ins.max_cpu_lock != 0 + && online > dbs_tuners_ins.max_cpu_lock) + return 1; + + if (dbs_tuners_ins.min_cpu_lock != 0 + && online <= dbs_tuners_ins.min_cpu_lock) + return 0; + + if (num_hist == 0 || num_hist % down_rate) + return 0; + + for (i = num_hist - 1; i >= num_hist - down_rate; --i) { + usage = &hotplug_history->usage[i]; + + freq = usage->freq; + rq_avg = usage->rq_avg; + avg_load = usage->avg_load; + + max_freq = max(max_freq, freq); + max_rq_avg = max(max_rq_avg, rq_avg); + max_avg_load = max(max_avg_load, avg_load); + + if (dbs_tuners_ins.dvfs_debug) + debug_hotplug_check(0, rq_avg, freq, usage); + } + + if ((max_freq <= down_freq && max_rq_avg <= down_rq) + || (online >= 3 && max_avg_load < 30)) { + printk(KERN_ERR "[HOTPLUG OUT] %s %d<=%d && %d<%d\n", + __func__, max_freq, down_freq, max_rq_avg, down_rq); + hotplug_history->num_hist = 0; + return 1; + } + + return 0; +} + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + unsigned int max_load_freq; + + struct cpufreq_policy *policy; + unsigned int j; + int num_hist = hotplug_history->num_hist; + int max_hotplug_rate = max(dbs_tuners_ins.cpu_up_rate, + dbs_tuners_ins.cpu_down_rate); + int up_threshold = dbs_tuners_ins.up_threshold; + + /* add total_load, avg_load to get average load */ + unsigned int total_load = 0; + unsigned int avg_load = 0; + int load_each[4] = {-1, -1, -1, -1}; + int rq_avg = 0; + policy = this_dbs_info->cur_policy; + + if (boostpulse_relayf) + { + if (boostpulse_relay_sr != 0) + dbs_tuners_ins.sampling_rate = boostpulse_relay_sr; + boostpulse_relayf = false; + if (policy->cur > Lboostpulse_value) + return; + + __cpufreq_driver_target(policy, Lboostpulse_value, + CPUFREQ_RELATION_H); + return; + } + + hotplug_history->usage[num_hist].freq = policy->cur; + hotplug_history->usage[num_hist].rq_avg = get_nr_run_avg(); + + /* add total_load, avg_load to get average load */ + rq_avg = hotplug_history->usage[num_hist].rq_avg; + + ++hotplug_history->num_hist; + + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + cputime64_t prev_wall_time, prev_idle_time, prev_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load, load_freq; + int freq_avg; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + prev_wall_time = j_dbs_info->prev_cpu_wall; + prev_idle_time = j_dbs_info->prev_cpu_idle; + prev_iowait_time = j_dbs_info->prev_cpu_iowait; + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + prev_wall_time); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + prev_idle_time); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, + prev_iowait_time); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + /* keep load of each CPUs and combined load across all CPUs */ + if (cpu_online(j)) + load_each[j] = load; + total_load += load; + + hotplug_history->usage[num_hist].load[j] = load; + + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = load * freq_avg; + if (load_freq > max_load_freq) + max_load_freq = load_freq; + } + /* calculate the average load across all related CPUs */ + avg_load = total_load / num_online_cpus(); + hotplug_history->usage[num_hist].avg_load = avg_load; + //pr_info("LOAD_TIMER - %d - %d - %d - %d", max_load_freq/1000, total_load, avg_load, num_online_cpus()); + + /* Check for CPU hotplug */ + if (check_up()) { + queue_work_on(this_dbs_info->cpu, dvfs_workqueue, + &this_dbs_info->up_work); + } else if (check_down()) { + queue_work_on(this_dbs_info->cpu, dvfs_workqueue, + &this_dbs_info->down_work); + } + if (hotplug_history->num_hist == max_hotplug_rate) + hotplug_history->num_hist = 0; + + /* Check for frequency increase */ + if (policy->cur < dbs_tuners_ins.freq_for_responsiveness) + up_threshold = dbs_tuners_ins.up_threshold_at_min_freq; + /* for fast frequency decrease */ + else + up_threshold = dbs_tuners_ins.up_threshold; + + if (max_load_freq > up_threshold * policy->cur) { + /* for multiple freq_step */ + int inc = policy->max * (dbs_tuners_ins.freq_step + - DEF_FREQ_STEP_DEC * 2) / 100; + int target = 0; + + /* for multiple freq_step */ + if (max_load_freq > (up_threshold + DEF_UP_THRESHOLD_DIFF * 2) + * policy->cur) + inc = policy->max * dbs_tuners_ins.freq_step / 100; + else if (max_load_freq > (up_threshold + DEF_UP_THRESHOLD_DIFF) + * policy->cur) + inc = policy->max * (dbs_tuners_ins.freq_step + - DEF_FREQ_STEP_DEC) / 100; + + target = min(policy->max, policy->cur + inc); + + /* If switching to max speed, apply sampling_down_factor */ + if (policy->cur < policy->max && target == policy->max) + this_dbs_info->rate_mult = + dbs_tuners_ins.sampling_down_factor; + dbs_freq_increase(policy, target); + return; + } + + /* Check for frequency decrease */ +#ifndef CONFIG_ARCH_EXYNOS4 + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) + return; +#endif + + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus DOWN_DIFFERENTIAL points under + * the threshold. + */ + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + unsigned int down_thres; + + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + /* No longer fully busy, reset rate_mult */ + this_dbs_info->rate_mult = 1; + + if (freq_next < policy->min) + freq_next = policy->min; + + + down_thres = dbs_tuners_ins.up_threshold_at_min_freq + - dbs_tuners_ins.down_differential; + + if (freq_next < dbs_tuners_ins.freq_for_responsiveness + && (max_load_freq / freq_next) > down_thres) + freq_next = dbs_tuners_ins.freq_for_responsiveness; + + if (policy->cur == freq_next) + return; + + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } +} + +/*extern void pegasusq_is_active(bool val); + +void boostpulse_relay_pq(void) +{ + if (Lboostpulse_value > 0) + { + //pr_info("BOOST_PULSE_FROM_INTERACTIVE"); + if (dbs_tuners_ins.sampling_rate != min_sampling_rate) + boostpulse_relay_sr = dbs_tuners_ins.sampling_rate; + boostpulse_relayf = true; + dbs_tuners_ins.sampling_rate = min_sampling_rate; + } +}*/ + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int delay; + + mutex_lock(&dbs_info->timer_mutex); + + dbs_check_cpu(dbs_info); + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate + * dbs_info->rate_mult); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + queue_delayed_work_on(cpu, dvfs_workqueue, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(DEF_START_DELAY * 1000 * 1000 + + dbs_tuners_ins.sampling_rate); + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + + queue_delayed_work_on(dbs_info->cpu, dvfs_workqueue, + &dbs_info->work, delay + 2 * HZ); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); + cancel_work_sync(&dbs_info->up_work); + cancel_work_sync(&dbs_info->down_work); +} + +static int reboot_notifier_call(struct notifier_block *this, + unsigned long code, void *_cmd) +{ + atomic_set(&g_hotplug_lock, 1); + return NOTIFY_DONE; +} + +static struct notifier_block reboot_notifier = { + .notifier_call = reboot_notifier_call, +}; + +#ifdef CONFIG_HAS_EARLYSUSPEND +static struct early_suspend early_suspend; +unsigned int prev_freq_step; +unsigned int prev_sampling_rate; +static void cpufreq_pegasusq_early_suspend(struct early_suspend *h) +{ +#if EARLYSUSPEND_HOTPLUGLOCK + dbs_tuners_ins.early_suspend = + atomic_read(&g_hotplug_lock); +#endif + prev_freq_step = dbs_tuners_ins.freq_step; + prev_sampling_rate = dbs_tuners_ins.sampling_rate; + dbs_tuners_ins.freq_step = 10; + dbs_tuners_ins.sampling_rate = 200000; +#if EARLYSUSPEND_HOTPLUGLOCK + atomic_set(&g_hotplug_lock, + (dbs_tuners_ins.min_cpu_lock) ? dbs_tuners_ins.min_cpu_lock : 1); + apply_hotplug_lock(); + stop_rq_work(); +#endif +} +static void cpufreq_pegasusq_late_resume(struct early_suspend *h) +{ +#if EARLYSUSPEND_HOTPLUGLOCK + atomic_set(&g_hotplug_lock, dbs_tuners_ins.early_suspend); +#endif + dbs_tuners_ins.early_suspend = -1; + dbs_tuners_ins.freq_step = prev_freq_step; + dbs_tuners_ins.sampling_rate = prev_sampling_rate; +#if EARLYSUSPEND_HOTPLUGLOCK + apply_hotplug_lock(); + start_rq_work(); +#endif +} +#endif + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + //pegasusq_is_active(true); + + prev_apenable = apget_enable_auto_hotplug(); + apenable_auto_hotplug(false); + + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + dbs_tuners_ins.max_freq = policy->max; + dbs_tuners_ins.min_freq = policy->min; + hotplug_history->num_hist = 0; + start_rq_work(); + + mutex_lock(&dbs_mutex); + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + } + this_dbs_info->cpu = cpu; + this_dbs_info->rate_mult = 1; + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + min_sampling_rate = MIN_SAMPLING_RATE; + dbs_tuners_ins.sampling_rate = DEF_SAMPLING_RATE; + dbs_tuners_ins.io_is_busy = 0; + } + mutex_unlock(&dbs_mutex); + + register_reboot_notifier(&reboot_notifier); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + +#if !EARLYSUSPEND_HOTPLUGLOCK + register_pm_notifier(&pm_notifier); +#endif +#ifdef CONFIG_HAS_EARLYSUSPEND + register_early_suspend(&early_suspend); +#endif + break; + + case CPUFREQ_GOV_STOP: + //pegasusq_is_active(false); + + apenable_auto_hotplug(prev_apenable); + +#ifdef CONFIG_HAS_EARLYSUSPEND + unregister_early_suspend(&early_suspend); +#endif +#if !EARLYSUSPEND_HOTPLUGLOCK + unregister_pm_notifier(&pm_notifier); +#endif + + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + mutex_destroy(&this_dbs_info->timer_mutex); + + unregister_reboot_notifier(&reboot_notifier); + + dbs_enable--; + mutex_unlock(&dbs_mutex); + + stop_rq_work(); + + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, + CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, + CPUFREQ_RELATION_L); + + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +static int __init cpufreq_gov_dbs_init(void) +{ + int ret; + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, 0); + + ret = init_rq_avg(); + if (ret) + return ret; + + INIT_WORK(&dbs_info->up_work, cpu_up_work); + INIT_WORK(&dbs_info->down_work, cpu_down_work); + + hotplug_history = kzalloc(sizeof(struct cpu_usage_history), GFP_KERNEL); + if (!hotplug_history) { + pr_err("%s cannot create hotplug history array\n", __func__); + ret = -ENOMEM; + goto err_hist; + } + + dvfs_workqueue = create_workqueue("kpegasusq"); + if (!dvfs_workqueue) { + pr_err("%s cannot create workqueue\n", __func__); + ret = -ENOMEM; + goto err_queue; + } + + ret = cpufreq_register_governor(&cpufreq_gov_pegasusq); + if (ret) + goto err_reg; + +#ifdef CONFIG_HAS_EARLYSUSPEND + early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; + early_suspend.suspend = cpufreq_pegasusq_early_suspend; + early_suspend.resume = cpufreq_pegasusq_late_resume; +#endif + + return ret; + +err_reg: + destroy_workqueue(dvfs_workqueue); +err_queue: + kfree(hotplug_history); +err_hist: + kfree(rq_data); + return ret; +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_pegasusq); + destroy_workqueue(dvfs_workqueue); + kfree(hotplug_history); + kfree(rq_data); +} + +MODULE_AUTHOR("ByungChang Cha "); +MODULE_DESCRIPTION("'cpufreq_pegasusq' - A dynamic cpufreq/cpuhotplug governor"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PEGASUSQ +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_slp.c b/drivers/cpufreq/cpufreq_slp.c new file mode 100644 index 00000000..bb1b00cf --- /dev/null +++ b/drivers/cpufreq/cpufreq_slp.c @@ -0,0 +1,1438 @@ +/* + * drivers/cpufreq/cpufreq_pegasusq.c + * + * Copyright (C) 2011 Samsung Electronics co. ltd + * ByungChang Cha + * + * Based on ondemand governor + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_HAS_EARLYSUSPEND +#include +#endif +#define EARLYSUSPEND_HOTPLUGLOCK 1 + +/* + * runqueue average + */ + +#define RQ_AVG_TIMER_RATE 10 + +extern void apenable_auto_hotplug(bool state); +extern bool apget_enable_auto_hotplug(void); +static bool prev_apenable; + +struct runqueue_data { + unsigned int nr_run_avg; + unsigned int update_rate; + int64_t last_time; + int64_t total_time; + struct delayed_work work; + struct workqueue_struct *nr_run_wq; + spinlock_t lock; +}; + +static struct runqueue_data *rq_data; +static void rq_work_fn(struct work_struct *work); + +static void start_rq_work(void) +{ + rq_data->nr_run_avg = 0; + rq_data->last_time = 0; + rq_data->total_time = 0; + if (rq_data->nr_run_wq == NULL) + rq_data->nr_run_wq = + create_singlethread_workqueue("nr_run_avg"); + + queue_delayed_work(rq_data->nr_run_wq, &rq_data->work, + msecs_to_jiffies(rq_data->update_rate)); + return; +} + +static void stop_rq_work(void) +{ + if (rq_data->nr_run_wq) + cancel_delayed_work(&rq_data->work); + return; +} + +static int __init init_rq_avg(void) +{ + rq_data = kzalloc(sizeof(struct runqueue_data), GFP_KERNEL); + if (rq_data == NULL) { + pr_err("%s cannot allocate memory\n", __func__); + return -ENOMEM; + } + spin_lock_init(&rq_data->lock); + rq_data->update_rate = RQ_AVG_TIMER_RATE; + INIT_DELAYED_WORK_DEFERRABLE(&rq_data->work, rq_work_fn); + + return 0; +} + +static void rq_work_fn(struct work_struct *work) +{ + int64_t time_diff = 0; + int64_t nr_run = 0; + unsigned long flags = 0; + int64_t cur_time = ktime_to_ns(ktime_get()); + + spin_lock_irqsave(&rq_data->lock, flags); + + if (rq_data->last_time == 0) + rq_data->last_time = cur_time; + if (rq_data->nr_run_avg == 0) + rq_data->total_time = 0; + + nr_run = nr_running() * 100; + time_diff = cur_time - rq_data->last_time; + do_div(time_diff, 1000 * 1000); + + if (time_diff != 0 && rq_data->total_time != 0) { + nr_run = (nr_run * time_diff) + + (rq_data->nr_run_avg * rq_data->total_time); + do_div(nr_run, rq_data->total_time + time_diff); + } + rq_data->nr_run_avg = nr_run; + rq_data->total_time += time_diff; + rq_data->last_time = cur_time; + + if (rq_data->update_rate != 0) + queue_delayed_work(rq_data->nr_run_wq, &rq_data->work, + msecs_to_jiffies(rq_data->update_rate)); + + spin_unlock_irqrestore(&rq_data->lock, flags); +} + +static unsigned int get_nr_run_avg(void) +{ + unsigned int nr_run_avg; + unsigned long flags = 0; + + spin_lock_irqsave(&rq_data->lock, flags); + nr_run_avg = rq_data->nr_run_avg; + rq_data->nr_run_avg = 0; + spin_unlock_irqrestore(&rq_data->lock, flags); + + return nr_run_avg; +} + + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_SAMPLING_DOWN_FACTOR (2) +#define MAX_SAMPLING_DOWN_FACTOR (100000) +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (5) +#define DEF_FREQUENCY_UP_THRESHOLD (85) +#define DEF_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) +#define DEF_SAMPLING_RATE (50000) +#define MIN_SAMPLING_RATE (10000) +#define MAX_HOTPLUG_RATE (40u) + +#define DEF_MAX_CPU_LOCK (0) +#define DEF_MIN_CPU_LOCK (0) +#define DEF_CPU_UP_FREQ (500000) +#define DEF_CPU_DOWN_FREQ (200000) +#define DEF_UP_NR_CPUS (1) +#define DEF_CPU_UP_RATE (10) +#define DEF_CPU_DOWN_RATE (20) +#define DEF_FREQ_STEP (40) +#define DEF_START_DELAY (0) + +#define UP_THRESHOLD_AT_MIN_FREQ (40) +#define FREQ_FOR_RESPONSIVENESS (500000) + +#define HOTPLUG_DOWN_INDEX (0) +#define HOTPLUG_UP_INDEX (1) + +#ifdef CONFIG_MACH_MIDAS +static int hotplug_rq[4][2] = { + {0, 100}, {100, 200}, {200, 300}, {300, 0} +}; + +static int hotplug_freq[4][2] = { + {0, 500000}, + {200000, 500000}, + {200000, 500000}, + {200000, 0} +}; +#else +static int hotplug_rq[4][2] = { + {0, 100}, {100, 200}, {200, 300}, {300, 0} +}; + +static int hotplug_freq[4][2] = { + {0, 500000}, + {200000, 500000}, + {200000, 500000}, + {200000, 0} +}; +#endif + +static unsigned int min_sampling_rate; + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_PEGASUSQ +static +#endif +struct cpufreq_governor cpufreq_gov_pegasusq = { + .name = "slp", + .governor = cpufreq_governor_dbs, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct work_struct up_work; + struct work_struct down_work; + struct cpufreq_frequency_table *freq_table; + unsigned int rate_mult; + int cpu; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); + +static struct workqueue_struct *dvfs_workqueue; + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int ignore_nice; + unsigned int sampling_down_factor; + unsigned int io_is_busy; + /* pegasusq tuners */ + unsigned int freq_step; + unsigned int cpu_up_rate; + unsigned int cpu_down_rate; + unsigned int cpu_up_freq; + unsigned int cpu_down_freq; + unsigned int up_nr_cpus; + unsigned int max_cpu_lock; + unsigned int min_cpu_lock; + atomic_t hotplug_lock; + unsigned int dvfs_debug; + unsigned int max_freq; + unsigned int min_freq; +#ifdef CONFIG_HAS_EARLYSUSPEND + int early_suspend; +#endif +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .ignore_nice = 0, + .freq_step = DEF_FREQ_STEP, + .cpu_up_rate = DEF_CPU_UP_RATE, + .cpu_down_rate = DEF_CPU_DOWN_RATE, + .cpu_up_freq = DEF_CPU_UP_FREQ, + .cpu_down_freq = DEF_CPU_DOWN_FREQ, + .up_nr_cpus = DEF_UP_NR_CPUS, + .max_cpu_lock = DEF_MAX_CPU_LOCK, + .min_cpu_lock = DEF_MIN_CPU_LOCK, + .hotplug_lock = ATOMIC_INIT(0), + .dvfs_debug = 0, +#ifdef CONFIG_HAS_EARLYSUSPEND + .early_suspend = -1, +#endif +}; + + +/* + * CPU hotplug lock interface + */ + +static atomic_t g_hotplug_count = ATOMIC_INIT(0); +static atomic_t g_hotplug_lock = ATOMIC_INIT(0); + +static void apply_hotplug_lock(void) +{ + int online, possible, lock, flag; + struct work_struct *work; + struct cpu_dbs_info_s *dbs_info; + + /* do turn_on/off cpus */ + dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ + online = num_online_cpus(); + possible = num_possible_cpus(); + lock = atomic_read(&g_hotplug_lock); + flag = lock - online; + + if (flag == 0) + return; + + work = flag > 0 ? &dbs_info->up_work : &dbs_info->down_work; + + pr_debug("%s online %d possible %d lock %d flag %d %d\n", + __func__, online, possible, lock, flag, (int)abs(flag)); + + queue_work_on(dbs_info->cpu, dvfs_workqueue, work); +} + +static int cpufreq_pegasusq_cpu_lock(int num_core) +{ + int prev_lock; + + if (num_core < 1 || num_core > num_possible_cpus()) + return -EINVAL; + + prev_lock = atomic_read(&g_hotplug_lock); + + if (prev_lock != 0 && prev_lock < num_core) + return -EINVAL; + else if (prev_lock == num_core) + atomic_inc(&g_hotplug_count); + + atomic_set(&g_hotplug_lock, num_core); + atomic_set(&g_hotplug_count, 1); + apply_hotplug_lock(); + + return 0; +} + +static int cpufreq_pegasusq_cpu_unlock(int num_core) +{ + int prev_lock = atomic_read(&g_hotplug_lock); + + if (prev_lock < num_core) + return 0; + else if (prev_lock == num_core) + atomic_dec(&g_hotplug_count); + + if (atomic_read(&g_hotplug_count) == 0) + atomic_set(&g_hotplug_lock, 0); + + return 0; +} + + +/* + * History of CPU usage + */ +struct cpu_usage { + unsigned int freq; + unsigned int load[NR_CPUS]; + unsigned int rq_avg; +}; + +struct cpu_usage_history { + struct cpu_usage usage[MAX_HOTPLUG_RATE]; + unsigned int num_hist; +}; + +static struct cpu_usage_history *hotplug_history; + +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, + u64 *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + + return idle_time; +} + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, + cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_min); + +/* cpufreq_pegasusq Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(sampling_down_factor, sampling_down_factor); +show_one(ignore_nice_load, ignore_nice); +show_one(down_differential, down_differential); +show_one(freq_step, freq_step); +show_one(cpu_up_rate, cpu_up_rate); +show_one(cpu_down_rate, cpu_down_rate); +show_one(cpu_up_freq, cpu_up_freq); +show_one(cpu_down_freq, cpu_down_freq); +show_one(up_nr_cpus, up_nr_cpus); +show_one(max_cpu_lock, max_cpu_lock); +show_one(min_cpu_lock, min_cpu_lock); +show_one(dvfs_debug, dvfs_debug); +static ssize_t show_hotplug_lock(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", atomic_read(&g_hotplug_lock)); +} + +#define show_hotplug_param(file_name, num_core, up_down) \ +static ssize_t show_##file_name##_##num_core##_##up_down \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", file_name[num_core - 1][up_down]); \ +} + +#define store_hotplug_param(file_name, num_core, up_down) \ +static ssize_t store_##file_name##_##num_core##_##up_down \ +(struct kobject *kobj, struct attribute *attr, \ + const char *buf, size_t count) \ +{ \ + unsigned int input; \ + int ret; \ + ret = sscanf(buf, "%u", &input); \ + if (ret != 1) \ + return -EINVAL; \ + file_name[num_core - 1][up_down] = input; \ + return count; \ +} + +show_hotplug_param(hotplug_freq, 1, 1); +show_hotplug_param(hotplug_freq, 2, 0); +show_hotplug_param(hotplug_freq, 2, 1); +show_hotplug_param(hotplug_freq, 3, 0); +show_hotplug_param(hotplug_freq, 3, 1); +show_hotplug_param(hotplug_freq, 4, 0); + +show_hotplug_param(hotplug_rq, 1, 1); +show_hotplug_param(hotplug_rq, 2, 0); +show_hotplug_param(hotplug_rq, 2, 1); +show_hotplug_param(hotplug_rq, 3, 0); +show_hotplug_param(hotplug_rq, 3, 1); +show_hotplug_param(hotplug_rq, 4, 0); + +store_hotplug_param(hotplug_freq, 1, 1); +store_hotplug_param(hotplug_freq, 2, 0); +store_hotplug_param(hotplug_freq, 2, 1); +store_hotplug_param(hotplug_freq, 3, 0); +store_hotplug_param(hotplug_freq, 3, 1); +store_hotplug_param(hotplug_freq, 4, 0); + +store_hotplug_param(hotplug_rq, 1, 1); +store_hotplug_param(hotplug_rq, 2, 0); +store_hotplug_param(hotplug_rq, 2, 1); +store_hotplug_param(hotplug_rq, 3, 0); +store_hotplug_param(hotplug_rq, 3, 1); +store_hotplug_param(hotplug_rq, 4, 0); + +define_one_global_rw(hotplug_freq_1_1); +define_one_global_rw(hotplug_freq_2_0); +define_one_global_rw(hotplug_freq_2_1); +define_one_global_rw(hotplug_freq_3_0); +define_one_global_rw(hotplug_freq_3_1); +define_one_global_rw(hotplug_freq_4_0); + +define_one_global_rw(hotplug_rq_1_1); +define_one_global_rw(hotplug_rq_2_0); +define_one_global_rw(hotplug_rq_2_1); +define_one_global_rw(hotplug_rq_3_0); +define_one_global_rw(hotplug_rq_3_1); +define_one_global_rw(hotplug_rq_4_0); + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.io_is_busy = !!input; + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold = input; + return count; +} + +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input, j; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + dbs_tuners_ins.sampling_down_factor = input; + + /* Reset down sampling multiplier in case it was active */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->rate_mult = 1; + } + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = + get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + return count; +} + +static ssize_t store_down_differential(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.down_differential = min(input, 100u); + return count; +} + +static ssize_t store_freq_step(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.freq_step = min(input, 100u); + return count; +} + +static ssize_t store_cpu_up_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.cpu_up_rate = min(input, MAX_HOTPLUG_RATE); + return count; +} + +static ssize_t store_cpu_down_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.cpu_down_rate = min(input, MAX_HOTPLUG_RATE); + return count; +} + +static ssize_t store_cpu_up_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.cpu_up_freq = min(input, dbs_tuners_ins.max_freq); + return count; +} + +static ssize_t store_cpu_down_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.cpu_down_freq = max(input, dbs_tuners_ins.min_freq); + return count; +} + +static ssize_t store_up_nr_cpus(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.up_nr_cpus = min(input, num_possible_cpus()); + return count; +} + +static ssize_t store_max_cpu_lock(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.max_cpu_lock = min(input, num_possible_cpus()); + return count; +} + +static ssize_t store_min_cpu_lock(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.min_cpu_lock = min(input, num_possible_cpus()); + return count; +} + +static ssize_t store_hotplug_lock(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + int prev_lock; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + input = min(input, num_possible_cpus()); + prev_lock = atomic_read(&dbs_tuners_ins.hotplug_lock); + + if (prev_lock) + cpufreq_pegasusq_cpu_unlock(prev_lock); + + if (input == 0) { + atomic_set(&dbs_tuners_ins.hotplug_lock, 0); + return count; + } + + ret = cpufreq_pegasusq_cpu_lock(input); + if (ret) { + printk(KERN_ERR "[HOTPLUG] already locked with smaller value %d < %d\n", + atomic_read(&g_hotplug_lock), input); + return ret; + } + + atomic_set(&dbs_tuners_ins.hotplug_lock, input); + + return count; +} + +static ssize_t store_dvfs_debug(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.dvfs_debug = input > 0; + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(down_differential); +define_one_global_rw(freq_step); +define_one_global_rw(cpu_up_rate); +define_one_global_rw(cpu_down_rate); +define_one_global_rw(cpu_up_freq); +define_one_global_rw(cpu_down_freq); +define_one_global_rw(up_nr_cpus); +define_one_global_rw(max_cpu_lock); +define_one_global_rw(min_cpu_lock); +define_one_global_rw(hotplug_lock); +define_one_global_rw(dvfs_debug); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &up_threshold.attr, + &sampling_down_factor.attr, + &ignore_nice_load.attr, + &io_is_busy.attr, + &down_differential.attr, + &freq_step.attr, + &cpu_up_rate.attr, + &cpu_down_rate.attr, + &cpu_up_freq.attr, + &cpu_down_freq.attr, + &up_nr_cpus.attr, + /* priority: hotplug_lock > max_cpu_lock > min_cpu_lock + Exception: hotplug_lock on early_suspend uses min_cpu_lock */ + &max_cpu_lock.attr, + &min_cpu_lock.attr, + &hotplug_lock.attr, + &dvfs_debug.attr, + &hotplug_freq_1_1.attr, + &hotplug_freq_2_0.attr, + &hotplug_freq_2_1.attr, + &hotplug_freq_3_0.attr, + &hotplug_freq_3_1.attr, + &hotplug_freq_4_0.attr, + &hotplug_rq_1_1.attr, + &hotplug_rq_2_0.attr, + &hotplug_rq_2_1.attr, + &hotplug_rq_3_0.attr, + &hotplug_rq_3_1.attr, + &hotplug_rq_4_0.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "pegasusq", +}; + +/************************** sysfs end ************************/ + +static void __cpuinit cpu_up_work(struct work_struct *work) +{ + int cpu; + int online = num_online_cpus(); + int nr_up = dbs_tuners_ins.up_nr_cpus; + int hotplug_lock = atomic_read(&g_hotplug_lock); + if (hotplug_lock) + nr_up = hotplug_lock - online; + + if (online == 1) { + printk(KERN_ERR "CPU_UP 3\n"); + cpu_up(num_possible_cpus() - 1); + nr_up -= 1; + } + + for_each_cpu_not(cpu, cpu_online_mask) { + if (nr_up-- == 0) + break; + if (cpu == 0) + continue; + printk(KERN_ERR "CPU_UP %d\n", cpu); + cpu_up(cpu); + } +} + +static void cpu_down_work(struct work_struct *work) +{ + int cpu; + int online = num_online_cpus(); + int nr_down = 1; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock) + nr_down = online - hotplug_lock; + + for_each_online_cpu(cpu) { + if (cpu == 0) + continue; + printk(KERN_ERR "CPU_DOWN %d\n", cpu); + cpu_down(cpu); + if (--nr_down == 0) + break; + } +} + +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ +#ifndef CONFIG_ARCH_EXYNOS4 + if (p->cur == p->max) + return; +#endif + + __cpufreq_driver_target(p, freq, CPUFREQ_RELATION_L); +} + +/* + * print hotplug debugging info. + * which 1 : UP, 0 : DOWN + */ +static void debug_hotplug_check(int which, int rq_avg, int freq, + struct cpu_usage *usage) +{ + int cpu; + printk(KERN_ERR "CHECK %s rq %d.%02d freq %d [", which ? "up" : "down", + rq_avg / 100, rq_avg % 100, freq); + for_each_online_cpu(cpu) { + printk(KERN_ERR "(%d, %d), ", cpu, usage->load[cpu]); + } + printk(KERN_ERR "]\n"); +} + +static int check_up(void) +{ + int num_hist = hotplug_history->num_hist; + struct cpu_usage *usage; + int freq, rq_avg; + int i; + int up_rate = dbs_tuners_ins.cpu_up_rate; + int up_freq, up_rq; + int min_freq = INT_MAX; + int min_rq_avg = INT_MAX; + int online; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock > 0) + return 0; + + online = num_online_cpus(); + up_freq = hotplug_freq[online - 1][HOTPLUG_UP_INDEX]; + up_rq = hotplug_rq[online - 1][HOTPLUG_UP_INDEX]; + + if (online == num_possible_cpus()) + return 0; + + if (dbs_tuners_ins.max_cpu_lock != 0 + && online >= dbs_tuners_ins.max_cpu_lock) + return 0; + + if (dbs_tuners_ins.min_cpu_lock != 0 + && online < dbs_tuners_ins.min_cpu_lock) + return 1; + + if (num_hist == 0 || num_hist % up_rate) + return 0; + + for (i = num_hist - 1; i >= num_hist - up_rate; --i) { + usage = &hotplug_history->usage[i]; + + freq = usage->freq; + rq_avg = usage->rq_avg; + + min_freq = min(min_freq, freq); + min_rq_avg = min(min_rq_avg, rq_avg); + + if (dbs_tuners_ins.dvfs_debug) + debug_hotplug_check(1, rq_avg, freq, usage); + } + + if (min_freq >= up_freq && min_rq_avg > up_rq) { + printk(KERN_ERR "[HOTPLUG IN] %s %d>=%d && %d>%d\n", + __func__, min_freq, up_freq, min_rq_avg, up_rq); + hotplug_history->num_hist = 0; + return 1; + } + return 0; +} + +static int check_down(void) +{ + int num_hist = hotplug_history->num_hist; + struct cpu_usage *usage; + int freq, rq_avg; + int i; + int down_rate = dbs_tuners_ins.cpu_down_rate; + int down_freq, down_rq; + int max_freq = 0; + int max_rq_avg = 0; + int online; + int hotplug_lock = atomic_read(&g_hotplug_lock); + + if (hotplug_lock > 0) + return 0; + + online = num_online_cpus(); + down_freq = hotplug_freq[online - 1][HOTPLUG_DOWN_INDEX]; + down_rq = hotplug_rq[online - 1][HOTPLUG_DOWN_INDEX]; + + if (online == 1) + return 0; + + if (dbs_tuners_ins.max_cpu_lock != 0 + && online > dbs_tuners_ins.max_cpu_lock) + return 1; + + if (dbs_tuners_ins.min_cpu_lock != 0 + && online <= dbs_tuners_ins.min_cpu_lock) + return 0; + + if (num_hist == 0 || num_hist % down_rate) + return 0; + + for (i = num_hist - 1; i >= num_hist - down_rate; --i) { + usage = &hotplug_history->usage[i]; + + freq = usage->freq; + rq_avg = usage->rq_avg; + + max_freq = max(max_freq, freq); + max_rq_avg = max(max_rq_avg, rq_avg); + + if (dbs_tuners_ins.dvfs_debug) + debug_hotplug_check(0, rq_avg, freq, usage); + } + + if (max_freq <= down_freq && max_rq_avg <= down_rq) { + printk(KERN_ERR "[HOTPLUG OUT] %s %d<=%d && %d<%d\n", + __func__, max_freq, down_freq, max_rq_avg, down_rq); + hotplug_history->num_hist = 0; + return 1; + } + + return 0; +} + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + unsigned int max_load_freq; + + struct cpufreq_policy *policy; + unsigned int j; + int num_hist = hotplug_history->num_hist; + int max_hotplug_rate = max(dbs_tuners_ins.cpu_up_rate, + dbs_tuners_ins.cpu_down_rate); + int up_threshold = dbs_tuners_ins.up_threshold; + + policy = this_dbs_info->cur_policy; + + hotplug_history->usage[num_hist].freq = policy->cur; + hotplug_history->usage[num_hist].rq_avg = get_nr_run_avg(); + ++hotplug_history->num_hist; + + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + cputime64_t prev_wall_time, prev_idle_time, prev_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load, load_freq; + int freq_avg; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + prev_wall_time = j_dbs_info->prev_cpu_wall; + prev_idle_time = j_dbs_info->prev_cpu_idle; + prev_iowait_time = j_dbs_info->prev_cpu_iowait; + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + prev_wall_time); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + prev_idle_time); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, + prev_iowait_time); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + hotplug_history->usage[num_hist].load[j] = load; + + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = load * freq_avg; + if (load_freq > max_load_freq) + max_load_freq = load_freq; + } + + /* Check for CPU hotplug */ + if (check_up()) { + queue_work_on(this_dbs_info->cpu, dvfs_workqueue, + &this_dbs_info->up_work); + } else if (check_down()) { + queue_work_on(this_dbs_info->cpu, dvfs_workqueue, + &this_dbs_info->down_work); + } + if (hotplug_history->num_hist == max_hotplug_rate) + hotplug_history->num_hist = 0; + + /* Check for frequency increase */ + if (policy->cur < FREQ_FOR_RESPONSIVENESS) + up_threshold = UP_THRESHOLD_AT_MIN_FREQ; + + if (max_load_freq > up_threshold * policy->cur) { + int inc = (policy->max * dbs_tuners_ins.freq_step) / 100; + int target = min(policy->max, policy->cur + inc); + /* If switching to max speed, apply sampling_down_factor */ + if (policy->cur < policy->max && target == policy->max) + this_dbs_info->rate_mult = + dbs_tuners_ins.sampling_down_factor; + dbs_freq_increase(policy, target); + return; + } + + /* Check for frequency decrease */ +#ifndef CONFIG_ARCH_EXYNOS4 + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) + return; +#endif + + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus DOWN_DIFFERENTIAL points under + * the threshold. + */ + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + unsigned int down_thres; + + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + /* No longer fully busy, reset rate_mult */ + this_dbs_info->rate_mult = 1; + + if (freq_next < policy->min) + freq_next = policy->min; + + + down_thres = UP_THRESHOLD_AT_MIN_FREQ + - dbs_tuners_ins.down_differential; + + if (freq_next < FREQ_FOR_RESPONSIVENESS + && (max_load_freq / freq_next) > down_thres) + freq_next = FREQ_FOR_RESPONSIVENESS; + + if (policy->cur == freq_next) + return; + + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int delay; + + mutex_lock(&dbs_info->timer_mutex); + + dbs_check_cpu(dbs_info); + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate + * dbs_info->rate_mult); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + queue_delayed_work_on(cpu, dvfs_workqueue, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(DEF_START_DELAY * 1000 * 1000 + + dbs_tuners_ins.sampling_rate); + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + + queue_delayed_work_on(dbs_info->cpu, dvfs_workqueue, + &dbs_info->work, delay + 2 * HZ); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); + cancel_work_sync(&dbs_info->up_work); + cancel_work_sync(&dbs_info->down_work); +} + +static int reboot_notifier_call(struct notifier_block *this, + unsigned long code, void *_cmd) +{ + atomic_set(&g_hotplug_lock, 1); + return NOTIFY_DONE; +} + +static struct notifier_block reboot_notifier = { + .notifier_call = reboot_notifier_call, +}; + +#ifdef CONFIG_HAS_EARLYSUSPEND +static struct early_suspend early_suspend; +unsigned int prev_freq_step_slp; +unsigned int prev_sampling_rate_slp; +static void cpufreq_pegasusq_early_suspend(struct early_suspend *h) +{ +#if EARLYSUSPEND_HOTPLUGLOCK + dbs_tuners_ins.early_suspend = + atomic_read(&g_hotplug_lock); +#endif + prev_freq_step_slp = dbs_tuners_ins.freq_step; + prev_sampling_rate_slp = dbs_tuners_ins.sampling_rate; + dbs_tuners_ins.freq_step = 20; + dbs_tuners_ins.sampling_rate *= 4; +#if EARLYSUSPEND_HOTPLUGLOCK + atomic_set(&g_hotplug_lock, + (dbs_tuners_ins.min_cpu_lock) ? dbs_tuners_ins.min_cpu_lock : 1); + apply_hotplug_lock(); + stop_rq_work(); +#endif +} +static void cpufreq_pegasusq_late_resume(struct early_suspend *h) +{ +#if EARLYSUSPEND_HOTPLUGLOCK + atomic_set(&g_hotplug_lock, dbs_tuners_ins.early_suspend); +#endif + dbs_tuners_ins.early_suspend = -1; + dbs_tuners_ins.freq_step = prev_freq_step_slp; + dbs_tuners_ins.sampling_rate = prev_sampling_rate_slp; +#if EARLYSUSPEND_HOTPLUGLOCK + apply_hotplug_lock(); + start_rq_work(); +#endif +} +#endif + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + prev_apenable = apget_enable_auto_hotplug(); + apenable_auto_hotplug(false); + + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + dbs_tuners_ins.max_freq = policy->max; + dbs_tuners_ins.min_freq = policy->min; + hotplug_history->num_hist = 0; + start_rq_work(); + + mutex_lock(&dbs_mutex); + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + } + this_dbs_info->cpu = cpu; + this_dbs_info->rate_mult = 1; + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + min_sampling_rate = MIN_SAMPLING_RATE; + dbs_tuners_ins.sampling_rate = DEF_SAMPLING_RATE; + dbs_tuners_ins.io_is_busy = 0; + } + mutex_unlock(&dbs_mutex); + + register_reboot_notifier(&reboot_notifier); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + +#if !EARLYSUSPEND_HOTPLUGLOCK + register_pm_notifier(&pm_notifier); +#endif +#ifdef CONFIG_HAS_EARLYSUSPEND + register_early_suspend(&early_suspend); +#endif + break; + + case CPUFREQ_GOV_STOP: + apenable_auto_hotplug(prev_apenable); + +#ifdef CONFIG_HAS_EARLYSUSPEND + unregister_early_suspend(&early_suspend); +#endif +#if !EARLYSUSPEND_HOTPLUGLOCK + unregister_pm_notifier(&pm_notifier); +#endif + + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + mutex_destroy(&this_dbs_info->timer_mutex); + + unregister_reboot_notifier(&reboot_notifier); + + dbs_enable--; + mutex_unlock(&dbs_mutex); + + stop_rq_work(); + + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, + CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, + CPUFREQ_RELATION_L); + + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +static int __init cpufreq_gov_dbs_init(void) +{ + int ret; + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, 0); + + ret = init_rq_avg(); + if (ret) + return ret; + + INIT_WORK(&dbs_info->up_work, cpu_up_work); + INIT_WORK(&dbs_info->down_work, cpu_down_work); + + hotplug_history = kzalloc(sizeof(struct cpu_usage_history), GFP_KERNEL); + if (!hotplug_history) { + pr_err("%s cannot create hotplug history array\n", __func__); + ret = -ENOMEM; + goto err_hist; + } + + dvfs_workqueue = create_workqueue("kpegasusq"); + if (!dvfs_workqueue) { + pr_err("%s cannot create workqueue\n", __func__); + ret = -ENOMEM; + goto err_queue; + } + + ret = cpufreq_register_governor(&cpufreq_gov_pegasusq); + if (ret) + goto err_reg; + +#ifdef CONFIG_HAS_EARLYSUSPEND + early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; + early_suspend.suspend = cpufreq_pegasusq_early_suspend; + early_suspend.resume = cpufreq_pegasusq_late_resume; +#endif + + return ret; + +err_reg: + destroy_workqueue(dvfs_workqueue); +err_queue: + kfree(hotplug_history); +err_hist: + kfree(rq_data); + return ret; +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_pegasusq); + destroy_workqueue(dvfs_workqueue); + kfree(hotplug_history); + kfree(rq_data); +} + +MODULE_AUTHOR("ByungChang Cha "); +MODULE_DESCRIPTION("'cpufreq_pegasusq' - A dynamic cpufreq/cpuhotplug governor"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PEGASUSQ +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_smartassH3.c b/drivers/cpufreq/cpufreq_smartassH3.c new file mode 100644 index 00000000..7e0891ed --- /dev/null +++ b/drivers/cpufreq/cpufreq_smartassH3.c @@ -0,0 +1,904 @@ +/* + * drivers/cpufreq/cpufreq_smartassH3.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Erasmux + * + * Based on the interactive governor By Mike Chan (mike@android.com) + * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net) + * + * SMP support based on mod by faux123 + * + * ZTE Skate specific tweaks by H3ROS @ MoDaCo, integrated by C3C0 @ MoDaCo + * + * For a general overview of smartassV2 see the relavent part in + * Documentation/cpu-freq/governors.txt + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/******************** Tunable parameters: ********************/ + +/* + * The "ideal" frequency to use when awake. The governor will ramp up faster + * towards the ideal frequency and slower after it has passed it. Similarly, + * lowering the frequency towards the ideal frequency is faster than below it. + */ +#define DEFAULT_AWAKE_IDEAL_FREQ 378000 +static unsigned int awake_ideal_freq; + +/* + * The "ideal" frequency to use when suspended. + * When set to 0, the governor will not track the suspended state (meaning + * that practically when sleep_ideal_freq==0 the awake_ideal_freq is used + * also when suspended). + */ +#define DEFAULT_SLEEP_IDEAL_FREQ 378000 +static unsigned int sleep_ideal_freq; + +/* + * Freqeuncy delta when ramping up above the ideal freqeuncy. + * Zero disables and causes to always jump straight to max frequency. + * When below the ideal freqeuncy we always ramp up to the ideal freq. + */ +#define DEFAULT_RAMP_UP_STEP 80000 +static unsigned int ramp_up_step; + +/* + * Freqeuncy delta when ramping down below the ideal freqeuncy. + * Zero disables and will calculate ramp down according to load heuristic. + * When above the ideal freqeuncy we always ramp down to the ideal freq. + */ +#define DEFAULT_RAMP_DOWN_STEP 80000 +static unsigned int ramp_down_step; + +/* + * CPU freq will be increased if measured load > max_cpu_load; + */ +#define DEFAULT_MAX_CPU_LOAD 85 +static unsigned long max_cpu_load; + +/* + * CPU freq will be decreased if measured load < min_cpu_load; + */ +#define DEFAULT_MIN_CPU_LOAD 70 +static unsigned long min_cpu_load; + +/* + * The minimum amount of time to spend at a frequency before we can ramp up. + * Notice we ignore this when we are below the ideal frequency. + */ +#define DEFAULT_UP_RATE_US 48000; +static unsigned long up_rate_us; + +/* + * The minimum amount of time to spend at a frequency before we can ramp down. + * Notice we ignore this when we are above the ideal frequency. + */ +#define DEFAULT_DOWN_RATE_US 49000; +static unsigned long down_rate_us; + +/* + * The frequency to set when waking up from sleep. + * When sleep_ideal_freq=0 this will have no effect. + */ +#define DEFAULT_SLEEP_WAKEUP_FREQ 99999999 +static unsigned int sleep_wakeup_freq; + +/* + * Sampling rate, I highly recommend to leave it at 2. + */ +#define DEFAULT_SAMPLE_RATE_JIFFIES 2 +static unsigned int sample_rate_jiffies; + + +/*************** End of tunables ***************/ + + +static void (*pm_idle_old)(void); +static atomic_t active_count = ATOMIC_INIT(0); + +struct smartass_info_s { + struct cpufreq_policy *cur_policy; + struct cpufreq_frequency_table *freq_table; + struct timer_list timer; + u64 time_in_idle; + u64 idle_exit_time; + u64 freq_change_time; + u64 freq_change_time_in_idle; + int cur_cpu_load; + int old_freq; + int ramp_dir; + unsigned int enable; + int ideal_speed; +}; +static DEFINE_PER_CPU(struct smartass_info_s, smartass_info); + +/* Workqueues handle frequency scaling */ +static struct workqueue_struct *up_wq; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_work; + +static cpumask_t work_cpumask; +static spinlock_t cpumask_lock; + +static unsigned int suspended; + +#define dprintk(flag,msg...) do { \ + if (debug_mask & flag) printk(KERN_DEBUG msg); \ + } while (0) + +enum { + SMARTASS_DEBUG_JUMPS=1, + SMARTASS_DEBUG_LOAD=2, + SMARTASS_DEBUG_ALG=4 +}; + +/* + * Combination of the above debug flags. + */ +static unsigned long debug_mask; + +static int cpufreq_governor_smartass_h3(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASSH3 +static +#endif +struct cpufreq_governor cpufreq_gov_smartass_h3 = { + .name = "smartassH3", + .governor = cpufreq_governor_smartass_h3, + .max_transition_latency = 9000000, + .owner = THIS_MODULE, +}; + +inline static void smartass_update_min_max(struct smartass_info_s *this_smartass, struct cpufreq_policy *policy, int suspend) { + if (suspend) { + this_smartass->ideal_speed = // sleep_ideal_freq; but make sure it obeys the policy min/max + policy->max > sleep_ideal_freq ? + (sleep_ideal_freq > policy->min ? sleep_ideal_freq : policy->min) : policy->max; + } else { + this_smartass->ideal_speed = // awake_ideal_freq; but make sure it obeys the policy min/max + policy->min < awake_ideal_freq ? + (awake_ideal_freq < policy->max ? awake_ideal_freq : policy->max) : policy->min; + } +} + +inline static void smartass_update_min_max_allcpus(void) { + unsigned int i; + for_each_online_cpu(i) { + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, i); + if (this_smartass->enable) + smartass_update_min_max(this_smartass,this_smartass->cur_policy,suspended); + } +} + +inline static unsigned int validate_freq(struct cpufreq_policy *policy, int freq) { + if (freq > (int)policy->max) + return policy->max; + if (freq < (int)policy->min) + return policy->min; + return freq; +} + +inline static void reset_timer(unsigned long cpu, struct smartass_info_s *this_smartass) { + this_smartass->time_in_idle = get_cpu_idle_time_us(cpu, &this_smartass->idle_exit_time); + mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies); +} + +inline static void work_cpumask_set(unsigned long cpu) { + unsigned long flags; + spin_lock_irqsave(&cpumask_lock, flags); + cpumask_set_cpu(cpu, &work_cpumask); + spin_unlock_irqrestore(&cpumask_lock, flags); +} + +inline static int work_cpumask_test_and_clear(unsigned long cpu) { + unsigned long flags; + int res = 0; + spin_lock_irqsave(&cpumask_lock, flags); + res = cpumask_test_and_clear_cpu(cpu, &work_cpumask); + spin_unlock_irqrestore(&cpumask_lock, flags); + return res; +} + +inline static int target_freq(struct cpufreq_policy *policy, struct smartass_info_s *this_smartass, + int new_freq, int old_freq, int prefered_relation) { + int index, target; + struct cpufreq_frequency_table *table = this_smartass->freq_table; + + if (new_freq == old_freq) + return 0; + new_freq = validate_freq(policy,new_freq); + if (new_freq == old_freq) + return 0; + + if (table && + !cpufreq_frequency_table_target(policy,table,new_freq,prefered_relation,&index)) + { + target = table[index].frequency; + if (target == old_freq) { + // if for example we are ramping up to *at most* current + ramp_up_step + // but there is no such frequency higher than the current, try also + // to ramp up to *at least* current + ramp_up_step. + if (new_freq > old_freq && prefered_relation==CPUFREQ_RELATION_H + && !cpufreq_frequency_table_target(policy,table,new_freq, + CPUFREQ_RELATION_L,&index)) + target = table[index].frequency; + // simlarly for ramping down: + else if (new_freq < old_freq && prefered_relation==CPUFREQ_RELATION_L + && !cpufreq_frequency_table_target(policy,table,new_freq, + CPUFREQ_RELATION_H,&index)) + target = table[index].frequency; + } + + if (target == old_freq) { + // We should not get here: + // If we got here we tried to change to a validated new_freq which is different + // from old_freq, so there is no reason for us to remain at same frequency. + printk(KERN_WARNING "Smartass: frequency change failed: %d to %d => %d\n", + old_freq,new_freq,target); + return 0; + } + } + else target = new_freq; + + __cpufreq_driver_target(policy, target, prefered_relation); + + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassQ: jumping from %d to %d => %d (%d)\n", + old_freq,new_freq,target,policy->cur); + + return target; +} + +static void cpufreq_smartass_timer(unsigned long cpu) +{ + u64 delta_idle; + u64 delta_time; + int cpu_load; + int old_freq; + u64 update_time; + u64 now_idle; + int queued_work = 0; + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); + struct cpufreq_policy *policy = this_smartass->cur_policy; + + now_idle = get_cpu_idle_time_us(cpu, &update_time); + old_freq = policy->cur; + + if (this_smartass->idle_exit_time == 0 || update_time == this_smartass->idle_exit_time) + return; + + delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle); + delta_time = cputime64_sub(update_time, this_smartass->idle_exit_time); + + // If timer ran less than 1ms after short-term sample started, retry. + if (delta_time < 1000) { + if (!timer_pending(&this_smartass->timer)) + reset_timer(cpu,this_smartass); + return; + } + + if (delta_idle > delta_time) + cpu_load = 0; + else + cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time; + + dprintk(SMARTASS_DEBUG_LOAD,"smartassT @ %d: load %d (delta_time %llu)\n", + old_freq,cpu_load,delta_time); + + this_smartass->cur_cpu_load = cpu_load; + this_smartass->old_freq = old_freq; + + // Scale up if load is above max or if there where no idle cycles since coming out of idle, + // additionally, if we are at or above the ideal_speed, verify we have been at this frequency + // for at least up_rate_us: + if (cpu_load > max_cpu_load || delta_idle == 0) + { + if (old_freq < policy->max && + (old_freq < this_smartass->ideal_speed || delta_idle == 0 || + cputime64_sub(update_time, this_smartass->freq_change_time) >= up_rate_us)) + { + dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp up: load %d (delta_idle %llu)\n", + old_freq,cpu_load,delta_idle); + this_smartass->ramp_dir = 1; + work_cpumask_set(cpu); + queue_work(up_wq, &freq_scale_work); + queued_work = 1; + } + else this_smartass->ramp_dir = 0; + } + // Similarly for scale down: load should be below min and if we are at or below ideal + // frequency we require that we have been at this frequency for at least down_rate_us: + else if (cpu_load < min_cpu_load && old_freq > policy->min && + (old_freq > this_smartass->ideal_speed || + cputime64_sub(update_time, this_smartass->freq_change_time) >= down_rate_us)) + { + dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp down: load %d (delta_idle %llu)\n", + old_freq,cpu_load,delta_idle); + this_smartass->ramp_dir = -1; + work_cpumask_set(cpu); + queue_work(down_wq, &freq_scale_work); + queued_work = 1; + } + else this_smartass->ramp_dir = 0; + + // To avoid unnecessary load when the CPU is already at high load, we don't + // reset ourselves if we are at max speed. If and when there are idle cycles, + // the idle loop will activate the timer. + // Additionally, if we queued some work, the work task will reset the timer + // after it has done its adjustments. + if (!queued_work && old_freq < policy->max) + reset_timer(cpu,this_smartass); +} + +static void cpufreq_idle(void) +{ + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + + if (!this_smartass->enable) { + pm_idle_old(); + return; + } + + if (policy->cur == policy->min && timer_pending(&this_smartass->timer)) + del_timer(&this_smartass->timer); + + pm_idle_old(); + + if (!timer_pending(&this_smartass->timer)) + reset_timer(smp_processor_id(), this_smartass); +} + +static int cpufreq_idle_notifier(struct notifier_block *nb, + unsigned long val, void *data) { + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + + if (!this_smartass->enable) + return NOTIFY_DONE; + + if (val == IDLE_START) { + if (policy->cur == policy->max && !timer_pending(&this_smartass->timer)) { + reset_timer(smp_processor_id(), this_smartass); + } else if (policy->cur == policy->min) { + if (timer_pending(&this_smartass->timer)) + del_timer(&this_smartass->timer); + } + } else if (val == IDLE_END) { + if (policy->cur == policy->min && !timer_pending(&this_smartass->timer)) + reset_timer(smp_processor_id(), this_smartass); + } + + return NOTIFY_OK; +} +static struct notifier_block cpufreq_idle_nb = { + .notifier_call = cpufreq_idle_notifier, +}; + +/* We use the same work function to sale up and down */ +static void cpufreq_smartass_freq_change_time_work(struct work_struct *work) +{ + unsigned int cpu; + int new_freq; + int old_freq; + int ramp_dir; + struct smartass_info_s *this_smartass; + struct cpufreq_policy *policy; + unsigned int relation = CPUFREQ_RELATION_L; + for_each_possible_cpu(cpu) { + this_smartass = &per_cpu(smartass_info, cpu); + if (!work_cpumask_test_and_clear(cpu)) + continue; + + ramp_dir = this_smartass->ramp_dir; + this_smartass->ramp_dir = 0; + + old_freq = this_smartass->old_freq; + policy = this_smartass->cur_policy; + + if (old_freq != policy->cur) { + // frequency was changed by someone else? + printk(KERN_WARNING "Smartass: frequency changed by 3rd party: %d to %d\n", + old_freq,policy->cur); + new_freq = old_freq; + } + else if (ramp_dir > 0 && nr_running() > 1) { + // ramp up logic: + if (old_freq < this_smartass->ideal_speed) + new_freq = this_smartass->ideal_speed; + else if (ramp_up_step) { + new_freq = old_freq + ramp_up_step; + relation = CPUFREQ_RELATION_H; + } + else { + new_freq = policy->max; + relation = CPUFREQ_RELATION_H; + } + dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp up: ramp_dir=%d ideal=%d\n", + old_freq,ramp_dir,this_smartass->ideal_speed); + } + else if (ramp_dir < 0) { + // ramp down logic: + if (old_freq > this_smartass->ideal_speed) { + new_freq = this_smartass->ideal_speed; + relation = CPUFREQ_RELATION_H; + } + else if (ramp_down_step) + new_freq = old_freq - ramp_down_step; + else { + // Load heuristics: Adjust new_freq such that, assuming a linear + // scaling of load vs. frequency, the load in the new frequency + // will be max_cpu_load: + new_freq = old_freq * this_smartass->cur_cpu_load / max_cpu_load; + if (new_freq > old_freq) // min_cpu_load > max_cpu_load ?! + new_freq = old_freq -1; + } + dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp down: ramp_dir=%d ideal=%d\n", + old_freq,ramp_dir,this_smartass->ideal_speed); + } + else { // ramp_dir==0 ?! Could the timer change its mind about a queued ramp up/down + // before the work task gets to run? + // This may also happen if we refused to ramp up because the nr_running()==1 + new_freq = old_freq; + dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d nothing: ramp_dir=%d nr_running=%lu\n", + old_freq,ramp_dir,nr_running()); + } + + // do actual ramp up (returns 0, if frequency change failed): + new_freq = target_freq(policy,this_smartass,new_freq,old_freq,relation); + if (new_freq) + this_smartass->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time); + + // reset timer: + if (new_freq < policy->max) + reset_timer(cpu,this_smartass); + // if we are maxed out, it is pointless to use the timer + // (idle cycles wake up the timer when the timer comes) + else if (timer_pending(&this_smartass->timer)) + del_timer(&this_smartass->timer); + + cpufreq_notify_utilization(policy, + (this_smartass->cur_cpu_load * policy->cur) / policy->max); + } +} + +static ssize_t show_debug_mask(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", debug_mask); +} + +static ssize_t store_debug_mask(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0) + debug_mask = input; + return res; +} + +static ssize_t show_up_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", up_rate_us); +} + +static ssize_t store_up_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0 && input <= 100000000) + up_rate_us = input; + return res; +} + +static ssize_t show_down_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", down_rate_us); +} + +static ssize_t store_down_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0 && input <= 100000000) + down_rate_us = input; + return res; +} + +static ssize_t show_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sleep_ideal_freq); +} + +static ssize_t store_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) { + sleep_ideal_freq = input; + if (suspended) + smartass_update_min_max_allcpus(); + } + return res; +} + +static ssize_t show_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sleep_wakeup_freq); +} + +static ssize_t store_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + sleep_wakeup_freq = input; + return res; +} + +static ssize_t show_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", awake_ideal_freq); +} + +static ssize_t store_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) { + awake_ideal_freq = input; + if (!suspended) + smartass_update_min_max_allcpus(); + } + return res; +} + +static ssize_t show_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sample_rate_jiffies); +} + +static ssize_t store_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 1000) + sample_rate_jiffies = input; + return res; +} + +static ssize_t show_ramp_up_step(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", ramp_up_step); +} + +static ssize_t store_ramp_up_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + ramp_up_step = input; + return res; +} + +static ssize_t show_ramp_down_step(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", ramp_down_step); +} + +static ssize_t store_ramp_down_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + ramp_down_step = input; + return res; +} + +static ssize_t show_max_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", max_cpu_load); +} + +static ssize_t store_max_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 100) + max_cpu_load = input; + return res; +} + +static ssize_t show_min_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", min_cpu_load); +} + +static ssize_t store_min_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input < 100) + min_cpu_load = input; + return res; +} + +#define define_global_rw_attr(_name) \ +static struct global_attr _name##_attr = \ + __ATTR(_name, 0644, show_##_name, store_##_name) + +define_global_rw_attr(debug_mask); +define_global_rw_attr(up_rate_us); +define_global_rw_attr(down_rate_us); +define_global_rw_attr(sleep_ideal_freq); +define_global_rw_attr(sleep_wakeup_freq); +define_global_rw_attr(awake_ideal_freq); +define_global_rw_attr(sample_rate_jiffies); +define_global_rw_attr(ramp_up_step); +define_global_rw_attr(ramp_down_step); +define_global_rw_attr(max_cpu_load); +define_global_rw_attr(min_cpu_load); + +static struct attribute * smartass_attributes[] = { + &debug_mask_attr.attr, + &up_rate_us_attr.attr, + &down_rate_us_attr.attr, + &sleep_ideal_freq_attr.attr, + &sleep_wakeup_freq_attr.attr, + &awake_ideal_freq_attr.attr, + &sample_rate_jiffies_attr.attr, + &ramp_up_step_attr.attr, + &ramp_down_step_attr.attr, + &max_cpu_load_attr.attr, + &min_cpu_load_attr.attr, + NULL, +}; + +static struct attribute_group smartass_attr_group = { + .attrs = smartass_attributes, + .name = "smartassH3", +}; + +static int cpufreq_governor_smartass_h3(struct cpufreq_policy *new_policy, + unsigned int event) +{ + unsigned int cpu = new_policy->cpu; + int rc; + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!new_policy->cur)) + return -EINVAL; + + this_smartass->cur_policy = new_policy; + + this_smartass->enable = 1; + + smartass_update_min_max(this_smartass,new_policy,suspended); + + this_smartass->freq_table = cpufreq_frequency_get_table(cpu); + if (!this_smartass->freq_table) + printk(KERN_WARNING "Smartass: no frequency table for cpu %d?!\n",cpu); + + smp_wmb(); + + // Do not register the idle hook and create sysfs + // entries if we have already done so. + if (atomic_inc_return(&active_count) <= 1) { + rc = sysfs_create_group(cpufreq_global_kobject, + &smartass_attr_group); + if (rc) + return rc; + + pm_idle_old = pm_idle; + pm_idle = cpufreq_idle; + idle_notifier_register(&cpufreq_idle_nb); + } + + if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer)) + reset_timer(cpu,this_smartass); + + break; + + case CPUFREQ_GOV_LIMITS: + smartass_update_min_max(this_smartass,new_policy,suspended); + + if (this_smartass->cur_policy->cur > new_policy->max) { + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new max freq: %d\n",new_policy->max); + __cpufreq_driver_target(this_smartass->cur_policy, + new_policy->max, CPUFREQ_RELATION_H); + } + else if (this_smartass->cur_policy->cur < new_policy->min) { + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new min freq: %d\n",new_policy->min); + __cpufreq_driver_target(this_smartass->cur_policy, + new_policy->min, CPUFREQ_RELATION_L); + } + + if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer)) + reset_timer(cpu,this_smartass); + + break; + + case CPUFREQ_GOV_STOP: + this_smartass->enable = 0; + smp_wmb(); + del_timer(&this_smartass->timer); + flush_work(&freq_scale_work); + this_smartass->idle_exit_time = 0; + + if (atomic_dec_return(&active_count) <= 1) { + sysfs_remove_group(cpufreq_global_kobject, + &smartass_attr_group); + pm_idle = pm_idle_old; + idle_notifier_unregister(&cpufreq_idle_nb); + } + break; + } + + return 0; +} + +static void smartass_suspend(int cpu, int suspend) +{ + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + unsigned int new_freq; + + if (!this_smartass->enable) + return; + + smartass_update_min_max(this_smartass,policy,suspend); + if (!suspend) { // resume at max speed: + new_freq = validate_freq(policy,sleep_wakeup_freq); + + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: awaking at %d\n",new_freq); + + __cpufreq_driver_target(policy, new_freq, + CPUFREQ_RELATION_L); + } else { + // to avoid wakeup issues with quick sleep/wakeup don't change actual frequency when entering sleep + // to allow some time to settle down. Instead we just reset our statistics (and reset the timer). + // Eventually, the timer will adjust the frequency if necessary. + + this_smartass->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time); + + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: suspending at %d\n",policy->cur); + } + + reset_timer(smp_processor_id(),this_smartass); +} + +static void smartass_early_suspend(struct early_suspend *handler) { + int i; + if (suspended || sleep_ideal_freq==0) // disable behavior for sleep_ideal_freq==0 + return; + suspended = 1; + for_each_online_cpu(i) + smartass_suspend(i,1); +} + +static void smartass_late_resume(struct early_suspend *handler) { + int i; + if (!suspended) // already not suspended so nothing to do + return; + suspended = 0; + for_each_online_cpu(i) + smartass_suspend(i,0); +} + +static struct early_suspend smartass_power_suspend = { + .suspend = smartass_early_suspend, + .resume = smartass_late_resume, +#ifdef CONFIG_MACH_HERO + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +#endif +}; + +static int __init cpufreq_smartass_init(void) +{ + unsigned int i; + struct smartass_info_s *this_smartass; + debug_mask = 0; + up_rate_us = DEFAULT_UP_RATE_US; + down_rate_us = DEFAULT_DOWN_RATE_US; + sleep_ideal_freq = DEFAULT_SLEEP_IDEAL_FREQ; + sleep_wakeup_freq = DEFAULT_SLEEP_WAKEUP_FREQ; + awake_ideal_freq = DEFAULT_AWAKE_IDEAL_FREQ; + sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES; + ramp_up_step = DEFAULT_RAMP_UP_STEP; + ramp_down_step = DEFAULT_RAMP_DOWN_STEP; + max_cpu_load = DEFAULT_MAX_CPU_LOAD; + min_cpu_load = DEFAULT_MIN_CPU_LOAD; + + spin_lock_init(&cpumask_lock); + + suspended = 0; + + /* Initalize per-cpu data: */ + for_each_possible_cpu(i) { + this_smartass = &per_cpu(smartass_info, i); + this_smartass->enable = 0; + this_smartass->cur_policy = 0; + this_smartass->ramp_dir = 0; + this_smartass->time_in_idle = 0; + this_smartass->idle_exit_time = 0; + this_smartass->freq_change_time = 0; + this_smartass->freq_change_time_in_idle = 0; + this_smartass->cur_cpu_load = 0; + // intialize timer: + init_timer_deferrable(&this_smartass->timer); + this_smartass->timer.function = cpufreq_smartass_timer; + this_smartass->timer.data = i; + work_cpumask_test_and_clear(i); + } + + // Scale up is high priority + up_wq = create_workqueue("ksmartass_up"); + down_wq = create_workqueue("ksmartass_down"); + if (!up_wq || !down_wq) + return -ENOMEM; + + INIT_WORK(&freq_scale_work, cpufreq_smartass_freq_change_time_work); + + register_early_suspend(&smartass_power_suspend); + + return cpufreq_register_governor(&cpufreq_gov_smartass_h3); +} + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASSH3 +fs_initcall(cpufreq_smartass_init); +#else +module_init(cpufreq_smartass_init); +#endif + +static void __exit cpufreq_smartass_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_smartass_h3); + destroy_workqueue(up_wq); + destroy_workqueue(down_wq); +} + +module_exit(cpufreq_smartass_exit); + +MODULE_AUTHOR ("Erasmux, moded by H3ROS & C3C0"); +MODULE_DESCRIPTION ("'cpufreq_smartassH3' - A smart cpufreq governor"); +MODULE_LICENSE ("GPL"); + diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h index eb3defb3..d814401b 100644 --- a/include/asm-generic/cputime.h +++ b/include/asm-generic/cputime.h @@ -15,6 +15,7 @@ typedef u64 __nocast cputime64_t; #define cputime64_to_jiffies64(__ct) (__force u64)(__ct) #define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif) +#define cputime64_sub(__a, __b) ((__a) - (__b)) #define nsecs_to_cputime64(__ct) \ jiffies64_to_cputime64(nsecs_to_jiffies64(__ct)) From ee96b5802f18bd8a59283c0503c0bd9440cd2262 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 08:33:39 -0400 Subject: [PATCH 06/35] CPUFREQ: Remove unneccessary Governor that still has warning --- drivers/cpufreq/Kconfig | 35 - drivers/cpufreq/Makefile | 2 - drivers/cpufreq/cpufreq_pegasusq.c | 1636 ---------------------------- 3 files changed, 1673 deletions(-) delete mode 100644 drivers/cpufreq/cpufreq_pegasusq.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 64452292..2e07ae54 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -108,15 +108,6 @@ config CPU_FREQ_DEFAULT_GOV_DANCEDANCE select CPU_FREQ_GOV_DANCEDANCE help -config CPU_FREQ_DEFAULT_GOV_INTERACTIVE - bool "interactive" - select CPU_FREQ_GOV_INTERACTIVE - help - Use the CPUFreq governor 'interactive' as default. This allows - you to get a full dynamic cpu frequency capable system by simply - loading your cpufreq low-level hardware driver, using the - 'interactive' governor for latency-sensitive workloads. - config CPU_FREQ_DEFAULT_GOV_NIGHTMARE bool "nightmare" select CPU_FREQ_GOV_NIGHTMARE @@ -134,12 +125,6 @@ config CPU_FREQ_DEFAULT_GOV_ONDEMAND governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. -config CPU_FREQ_DEFAULT_GOV_PEGASUSQ - bool "pegasusq" - select CPU_FREQ_GOV_PEGASUSQ - help - Use the CPUFreq governor 'pegasusq' as default. - config CPU_FREQ_DEFAULT_GOV_SLP bool "slp" select CPU_FREQ_GOV_SLP @@ -272,23 +257,6 @@ config CPU_FREQ_GOV_DANCEDANCE tristate "'dancedance' cpufreq governor" depends on CPU_FREQ -config CPU_FREQ_GOV_INTERACTIVE - tristate "'interactive' cpufreq policy governor" - help - 'interactive' - This driver adds a dynamic cpufreq policy governor - designed for latency-sensitive workloads. - - This governor attempts to reduce the latency of clock - increases so that the system is more responsive to - interactive workloads. - - To compile this driver as a module, choose M here: the - module will be called cpufreq_interactive. - - For details, take a look at linux/Documentation/cpu-freq. - - If in doubt, say N. - config CPU_FREQ_GOV_NIGHTMARE tristate "'nightmare' cpufreq governor" depends on CPU_FREQ @@ -322,9 +290,6 @@ config CPU_FREQ_GOV_PERFORMANCE If in doubt, say Y. -config CPU_FREQ_GOV_PEGASUSQ - tristate "'pegasusq' cpufreq policy governor" - config CPU_FREQ_GOV_POWERSAVE tristate "'powersave' governor" help diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index c65736f3..be135afd 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -10,10 +10,8 @@ obj-$(CONFIG_CPU_FREQ_GOV_ASSWAX) += cpufreq_asswax.o obj-$(CONFIG_CPU_FREQ_GOV_BADASS) += cpufreq_badass.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o obj-$(CONFIG_CPU_FREQ_GOV_DANCEDANCE) += cpufreq_dancedance.o -obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o obj-$(CONFIG_CPU_FREQ_GOV_NIGHTMARE) += cpufreq_nightmare.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o -obj-$(CONFIG_CPU_FREQ_GOV_PEGASUSQ) += cpufreq_pegasusq.o obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o obj-$(CONFIG_CPU_FREQ_GOV_SLP) += cpufreq_slp.o diff --git a/drivers/cpufreq/cpufreq_pegasusq.c b/drivers/cpufreq/cpufreq_pegasusq.c deleted file mode 100644 index 230abf81..00000000 --- a/drivers/cpufreq/cpufreq_pegasusq.c +++ /dev/null @@ -1,1636 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_pegasusq.c - * - * Copyright (C) 2011 Samsung Electronics co. ltd - * ByungChang Cha - * - * Based on ondemand governor - * Copyright (C) 2001 Russell King - * (C) 2003 Venkatesh Pallipadi . - * Jun Nakajima - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef CONFIG_HAS_EARLYSUSPEND -#include -#endif -#define EARLYSUSPEND_HOTPLUGLOCK 1 - -/* - * runqueue average - */ - -#define RQ_AVG_TIMER_RATE 10 - -static bool boostpulse_relayf = false; -static unsigned int boostpulse_relay_sr = 0; -static unsigned int Lboostpulse_value = 1134000; - -extern void apenable_auto_hotplug(bool state); -extern bool apget_enable_auto_hotplug(void); -static bool prev_apenable; - -struct runqueue_data { - unsigned int nr_run_avg; - unsigned int update_rate; - int64_t last_time; - int64_t total_time; - struct delayed_work work; - struct workqueue_struct *nr_run_wq; - spinlock_t lock; -}; - -static struct runqueue_data *rq_data; -static void rq_work_fn(struct work_struct *work); - -static void start_rq_work(void) -{ - rq_data->nr_run_avg = 0; - rq_data->last_time = 0; - rq_data->total_time = 0; - if (rq_data->nr_run_wq == NULL) - rq_data->nr_run_wq = - create_singlethread_workqueue("nr_run_avg"); - - queue_delayed_work(rq_data->nr_run_wq, &rq_data->work, - msecs_to_jiffies(rq_data->update_rate)); - return; -} - -static void stop_rq_work(void) -{ - if (rq_data->nr_run_wq) - cancel_delayed_work(&rq_data->work); - return; -} - -static int __init init_rq_avg(void) -{ - rq_data = kzalloc(sizeof(struct runqueue_data), GFP_KERNEL); - if (rq_data == NULL) { - pr_err("%s cannot allocate memory\n", __func__); - return -ENOMEM; - } - spin_lock_init(&rq_data->lock); - rq_data->update_rate = RQ_AVG_TIMER_RATE; - INIT_DELAYED_WORK_DEFERRABLE(&rq_data->work, rq_work_fn); - - return 0; -} - -static void rq_work_fn(struct work_struct *work) -{ - int64_t time_diff = 0; - int64_t nr_run = 0; - unsigned long flags = 0; - int64_t cur_time = ktime_to_ns(ktime_get()); - - spin_lock_irqsave(&rq_data->lock, flags); - - if (rq_data->last_time == 0) - rq_data->last_time = cur_time; - if (rq_data->nr_run_avg == 0) - rq_data->total_time = 0; - - nr_run = nr_running() * 100; - time_diff = cur_time - rq_data->last_time; - do_div(time_diff, 1000 * 1000); - - if (time_diff != 0 && rq_data->total_time != 0) { - nr_run = (nr_run * time_diff) + - (rq_data->nr_run_avg * rq_data->total_time); - do_div(nr_run, rq_data->total_time + time_diff); - } - rq_data->nr_run_avg = nr_run; - rq_data->total_time += time_diff; - rq_data->last_time = cur_time; - - if (rq_data->update_rate != 0) - queue_delayed_work(rq_data->nr_run_wq, &rq_data->work, - msecs_to_jiffies(rq_data->update_rate)); - - spin_unlock_irqrestore(&rq_data->lock, flags); -} - -static unsigned int get_nr_run_avg(void) -{ - unsigned int nr_run_avg; - unsigned long flags = 0; - - spin_lock_irqsave(&rq_data->lock, flags); - nr_run_avg = rq_data->nr_run_avg; - rq_data->nr_run_avg = 0; - spin_unlock_irqrestore(&rq_data->lock, flags); - - return nr_run_avg; -} - - -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ - -#define DEF_SAMPLING_DOWN_FACTOR (3) -#define MAX_SAMPLING_DOWN_FACTOR (100000) -#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (14) -#define DEF_FREQUENCY_UP_THRESHOLD (95) - -/* for multiple freq_step */ -#define DEF_UP_THRESHOLD_DIFF (5) - -#define DEF_FREQUENCY_MIN_SAMPLE_RATE (10000) -#define MIN_FREQUENCY_UP_THRESHOLD (11) -#define MAX_FREQUENCY_UP_THRESHOLD (100) -#define DEF_SAMPLING_RATE (40000) -#define MIN_SAMPLING_RATE (10000) -#define MAX_HOTPLUG_RATE (40u) - -#define DEF_MAX_CPU_LOCK (0) -#define DEF_MIN_CPU_LOCK (0) -#define DEF_CPU_UP_FREQ (500000) -#define DEF_CPU_DOWN_FREQ (200000) -#define DEF_UP_NR_CPUS (1) -#define DEF_CPU_UP_RATE (9) -#define DEF_CPU_DOWN_RATE (3) -#define DEF_FREQ_STEP (30) -/* for multiple freq_step */ -#define DEF_FREQ_STEP_DEC (13) - -#define DEF_START_DELAY (0) - -#define UP_THRESHOLD_AT_MIN_FREQ (55) -#define FREQ_FOR_RESPONSIVENESS (400000) -/* for fast decrease */ -#define FREQ_FOR_FAST_DOWN (1200000) -#define UP_THRESHOLD_AT_FAST_DOWN (95) - -#define HOTPLUG_DOWN_INDEX (0) -#define HOTPLUG_UP_INDEX (1) - -#ifdef CONFIG_MACH_MIDAS -static int hotplug_rq[4][2] = { - {0, 100}, {100, 200}, {200, 300}, {300, 0} -}; - -static int hotplug_freq[4][2] = { - {0, 500000}, - {200000, 600000}, - {500000, 800000}, - {500000, 0} -}; -#else -static int hotplug_rq[4][2] = { - {0, 200}, {200, 200}, {200, 300}, {300, 0} -}; - -static int hotplug_freq[4][2] = { - {0, 800000}, - {500000, 500000}, - {200000, 500000}, - {200000, 0} -}; -#endif - -static unsigned int min_sampling_rate; - -static void do_dbs_timer(struct work_struct *work); -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_PEGASUSQ -static -#endif -struct cpufreq_governor cpufreq_gov_pegasusq = { - .name = "pegasusq", - .governor = cpufreq_governor_dbs, - .owner = THIS_MODULE, -}; - -/* Sampling types */ -enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; - -struct cpu_dbs_info_s { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_iowait; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - struct work_struct up_work; - struct work_struct down_work; - struct cpufreq_frequency_table *freq_table; - unsigned int rate_mult; - int cpu; - /* - * percpu mutex that serializes governor limit change with - * do_dbs_timer invocation. We do not want do_dbs_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); - -struct workqueue_struct *dvfs_workqueue; - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - -/* - * dbs_mutex protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); - -static struct dbs_tuners { - unsigned int sampling_rate; - unsigned int up_threshold; - unsigned int down_differential; - unsigned int ignore_nice; - unsigned int sampling_down_factor; - unsigned int io_is_busy; - /* pegasusq tuners */ - unsigned int freq_step; - unsigned int cpu_up_rate; - unsigned int cpu_down_rate; - unsigned int cpu_up_freq; - unsigned int cpu_down_freq; - unsigned int up_nr_cpus; - unsigned int max_cpu_lock; - unsigned int min_cpu_lock; - atomic_t hotplug_lock; - unsigned int dvfs_debug; - unsigned int max_freq; - unsigned int min_freq; -#ifdef CONFIG_HAS_EARLYSUSPEND - int early_suspend; -#endif - unsigned int up_threshold_at_min_freq; - unsigned int freq_for_responsiveness; -} dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, - .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, - .ignore_nice = 1, - .freq_step = DEF_FREQ_STEP, - .cpu_up_rate = DEF_CPU_UP_RATE, - .cpu_down_rate = DEF_CPU_DOWN_RATE, - .cpu_up_freq = DEF_CPU_UP_FREQ, - .cpu_down_freq = DEF_CPU_DOWN_FREQ, - .up_nr_cpus = DEF_UP_NR_CPUS, - .max_cpu_lock = DEF_MAX_CPU_LOCK, - .min_cpu_lock = DEF_MIN_CPU_LOCK, - .hotplug_lock = ATOMIC_INIT(0), - .dvfs_debug = 0, -#ifdef CONFIG_HAS_EARLYSUSPEND - .early_suspend = -1, -#endif - .up_threshold_at_min_freq = UP_THRESHOLD_AT_MIN_FREQ, - .freq_for_responsiveness = FREQ_FOR_RESPONSIVENESS, -}; - - -/* - * CPU hotplug lock interface - */ - -static atomic_t g_hotplug_count = ATOMIC_INIT(0); -static atomic_t g_hotplug_lock = ATOMIC_INIT(0); - -static void apply_hotplug_lock(void) -{ - int online, possible, lock, flag; - struct work_struct *work; - struct cpu_dbs_info_s *dbs_info; - - /* do turn_on/off cpus */ - dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ - online = num_online_cpus(); - possible = num_possible_cpus(); - lock = atomic_read(&g_hotplug_lock); - flag = lock - online; - - if (flag == 0) - return; - - work = flag > 0 ? &dbs_info->up_work : &dbs_info->down_work; - - pr_debug("%s online %d possible %d lock %d flag %d %d\n", - __func__, online, possible, lock, flag, (int)abs(flag)); - - queue_work_on(dbs_info->cpu, dvfs_workqueue, work); -} - -int cpufreq_pegasusq_cpu_lock(int num_core) -{ - int prev_lock; - - if (num_core < 1 || num_core > num_possible_cpus()) - return -EINVAL; - - prev_lock = atomic_read(&g_hotplug_lock); - - if (prev_lock != 0 && prev_lock < num_core) - return -EINVAL; - else if (prev_lock == num_core) - atomic_inc(&g_hotplug_count); - - atomic_set(&g_hotplug_lock, num_core); - atomic_set(&g_hotplug_count, 1); - apply_hotplug_lock(); - - return 0; -} - -int cpufreq_pegasusq_cpu_unlock(int num_core) -{ - int prev_lock = atomic_read(&g_hotplug_lock); - - if (prev_lock < num_core) - return 0; - else if (prev_lock == num_core) - atomic_dec(&g_hotplug_count); - - if (atomic_read(&g_hotplug_count) == 0) - atomic_set(&g_hotplug_lock, 0); - - return 0; -} - -void cpufreq_pegasusq_min_cpu_lock(unsigned int num_core) -{ - int online, flag; - struct cpu_dbs_info_s *dbs_info; - - dbs_tuners_ins.min_cpu_lock = min(num_core, num_possible_cpus()); - - dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ - online = num_online_cpus(); - flag = (int)num_core - online; - if (flag <= 0) - return; - queue_work_on(dbs_info->cpu, dvfs_workqueue, &dbs_info->up_work); -} - -void cpufreq_pegasusq_min_cpu_unlock(void) -{ - int online, lock, flag; - struct cpu_dbs_info_s *dbs_info; - - dbs_tuners_ins.min_cpu_lock = 0; - - dbs_info = &per_cpu(od_cpu_dbs_info, 0); /* from CPU0 */ - online = num_online_cpus(); - lock = atomic_read(&g_hotplug_lock); - if (lock == 0) - return; - flag = lock - online; - if (flag >= 0) - return; - queue_work_on(dbs_info->cpu, dvfs_workqueue, &dbs_info->down_work); -} - -/* - * History of CPU usage - */ -struct cpu_usage { - unsigned int freq; - unsigned int load[NR_CPUS]; - unsigned int rq_avg; - unsigned int avg_load; -}; - -struct cpu_usage_history { - struct cpu_usage usage[MAX_HOTPLUG_RATE]; - unsigned int num_hist; -}; - -struct cpu_usage_history *hotplug_history; - -static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, - u64 *wall) -{ - u64 idle_time; - u64 cur_wall_time; - u64 busy_time; - - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - - busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - - idle_time = cur_wall_time - busy_time; - if (wall) - *wall = jiffies_to_usecs(cur_wall_time); - - return jiffies_to_usecs(idle_time); -} - -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) -{ - u64 idle_time = get_cpu_idle_time_us(cpu, wall); - - if (idle_time == -1ULL) - return get_cpu_idle_time_jiffy(cpu, wall); - - return idle_time; -} - -static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, - cputime64_t *wall) -{ - u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); - - if (iowait_time == -1ULL) - return 0; - - return iowait_time; -} - -/************************** sysfs interface ************************/ - -static ssize_t show_sampling_rate_min(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", min_sampling_rate); -} - -static ssize_t show_boostpulse_value(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", Lboostpulse_value / 1000); -} - -define_one_global_ro(sampling_rate_min); - -/* cpufreq_pegasusq Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(io_is_busy, io_is_busy); -show_one(up_threshold, up_threshold); -show_one(sampling_down_factor, sampling_down_factor); -show_one(ignore_nice_load, ignore_nice); -show_one(down_differential, down_differential); -show_one(freq_step, freq_step); -show_one(cpu_up_rate, cpu_up_rate); -show_one(cpu_down_rate, cpu_down_rate); -show_one(cpu_up_freq, cpu_up_freq); -show_one(cpu_down_freq, cpu_down_freq); -show_one(up_nr_cpus, up_nr_cpus); -show_one(max_cpu_lock, max_cpu_lock); -show_one(min_cpu_lock, min_cpu_lock); -show_one(dvfs_debug, dvfs_debug); -show_one(up_threshold_at_min_freq, up_threshold_at_min_freq); -show_one(freq_for_responsiveness, freq_for_responsiveness); -static ssize_t show_hotplug_lock(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", atomic_read(&g_hotplug_lock)); -} - -#define show_hotplug_param(file_name, num_core, up_down) \ -static ssize_t show_##file_name##_##num_core##_##up_down \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", file_name[num_core - 1][up_down]); \ -} - -#define store_hotplug_param(file_name, num_core, up_down) \ -static ssize_t store_##file_name##_##num_core##_##up_down \ -(struct kobject *kobj, struct attribute *attr, \ - const char *buf, size_t count) \ -{ \ - unsigned int input; \ - int ret; \ - ret = sscanf(buf, "%u", &input); \ - if (ret != 1) \ - return -EINVAL; \ - file_name[num_core - 1][up_down] = input; \ - return count; \ -} - -show_hotplug_param(hotplug_freq, 1, 1); -show_hotplug_param(hotplug_freq, 2, 0); -show_hotplug_param(hotplug_freq, 2, 1); -show_hotplug_param(hotplug_freq, 3, 0); -show_hotplug_param(hotplug_freq, 3, 1); -show_hotplug_param(hotplug_freq, 4, 0); - -show_hotplug_param(hotplug_rq, 1, 1); -show_hotplug_param(hotplug_rq, 2, 0); -show_hotplug_param(hotplug_rq, 2, 1); -show_hotplug_param(hotplug_rq, 3, 0); -show_hotplug_param(hotplug_rq, 3, 1); -show_hotplug_param(hotplug_rq, 4, 0); - -store_hotplug_param(hotplug_freq, 1, 1); -store_hotplug_param(hotplug_freq, 2, 0); -store_hotplug_param(hotplug_freq, 2, 1); -store_hotplug_param(hotplug_freq, 3, 0); -store_hotplug_param(hotplug_freq, 3, 1); -store_hotplug_param(hotplug_freq, 4, 0); - -store_hotplug_param(hotplug_rq, 1, 1); -store_hotplug_param(hotplug_rq, 2, 0); -store_hotplug_param(hotplug_rq, 2, 1); -store_hotplug_param(hotplug_rq, 3, 0); -store_hotplug_param(hotplug_rq, 3, 1); -store_hotplug_param(hotplug_rq, 4, 0); - -define_one_global_rw(hotplug_freq_1_1); -define_one_global_rw(hotplug_freq_2_0); -define_one_global_rw(hotplug_freq_2_1); -define_one_global_rw(hotplug_freq_3_0); -define_one_global_rw(hotplug_freq_3_1); -define_one_global_rw(hotplug_freq_4_0); - -define_one_global_rw(hotplug_rq_1_1); -define_one_global_rw(hotplug_rq_2_0); -define_one_global_rw(hotplug_rq_2_1); -define_one_global_rw(hotplug_rq_3_0); -define_one_global_rw(hotplug_rq_3_1); -define_one_global_rw(hotplug_rq_4_0); - -static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); - return count; -} - -static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - dbs_tuners_ins.io_is_busy = !!input; - return count; -} - -static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || - input < MIN_FREQUENCY_UP_THRESHOLD) { - return -EINVAL; - } - dbs_tuners_ins.up_threshold = input; - return count; -} - -static ssize_t store_sampling_down_factor(struct kobject *a, - struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input, j; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) - return -EINVAL; - dbs_tuners_ins.sampling_down_factor = input; - - /* Reset down sampling multiplier in case it was active */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(od_cpu_dbs_info, j); - dbs_info->rate_mult = 1; - } - return count; -} - -static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - return count; - } - dbs_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(od_cpu_dbs_info, j); - dbs_info->prev_cpu_idle = - get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - return count; -} - -static ssize_t store_down_differential(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.down_differential = min(input, 100u); - return count; -} - -static ssize_t store_freq_step(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.freq_step = min(input, 100u); - return count; -} - -static ssize_t store_cpu_up_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.cpu_up_rate = min(input, MAX_HOTPLUG_RATE); - return count; -} - -static ssize_t store_cpu_down_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.cpu_down_rate = min(input, MAX_HOTPLUG_RATE); - return count; -} - -static ssize_t store_cpu_up_freq(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.cpu_up_freq = min(input, dbs_tuners_ins.max_freq); - return count; -} - -static ssize_t store_cpu_down_freq(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.cpu_down_freq = max(input, dbs_tuners_ins.min_freq); - return count; -} - -static ssize_t store_up_nr_cpus(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.up_nr_cpus = min(input, num_possible_cpus()); - return count; -} - -static ssize_t store_max_cpu_lock(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.max_cpu_lock = min(input, num_possible_cpus()); - return count; -} - -static ssize_t store_min_cpu_lock(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - if (input == 0) - cpufreq_pegasusq_min_cpu_unlock(); - else - cpufreq_pegasusq_min_cpu_lock(input); - return count; -} - -static ssize_t store_hotplug_lock(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - int prev_lock; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - input = min(input, num_possible_cpus()); - prev_lock = atomic_read(&dbs_tuners_ins.hotplug_lock); - - if (prev_lock) - cpufreq_pegasusq_cpu_unlock(prev_lock); - - if (input == 0) { - atomic_set(&dbs_tuners_ins.hotplug_lock, 0); - return count; - } - - ret = cpufreq_pegasusq_cpu_lock(input); - if (ret) { - printk(KERN_ERR "[HOTPLUG] already locked with smaller value %d < %d\n", - atomic_read(&g_hotplug_lock), input); - return ret; - } - - atomic_set(&dbs_tuners_ins.hotplug_lock, input); - - return count; -} - -static ssize_t store_dvfs_debug(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.dvfs_debug = input > 0; - return count; -} - -static ssize_t store_up_threshold_at_min_freq(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || - input < MIN_FREQUENCY_UP_THRESHOLD) { - return -EINVAL; - } - dbs_tuners_ins.up_threshold_at_min_freq = input; - return count; -} - -static ssize_t store_freq_for_responsiveness(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.freq_for_responsiveness = input; - return count; -} - -static ssize_t store_boostpulse_value(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - if (input * 1000 > 2106000) - input = 2106000; - - Lboostpulse_value = input * 1000; - return count; -} - -define_one_global_rw(sampling_rate); -define_one_global_rw(io_is_busy); -define_one_global_rw(up_threshold); -define_one_global_rw(sampling_down_factor); -define_one_global_rw(ignore_nice_load); -define_one_global_rw(down_differential); -define_one_global_rw(freq_step); -define_one_global_rw(cpu_up_rate); -define_one_global_rw(cpu_down_rate); -define_one_global_rw(cpu_up_freq); -define_one_global_rw(cpu_down_freq); -define_one_global_rw(up_nr_cpus); -define_one_global_rw(max_cpu_lock); -define_one_global_rw(min_cpu_lock); -define_one_global_rw(hotplug_lock); -define_one_global_rw(dvfs_debug); -define_one_global_rw(up_threshold_at_min_freq); -define_one_global_rw(freq_for_responsiveness); -define_one_global_rw(boostpulse_value); - -static struct attribute *dbs_attributes[] = { - &sampling_rate_min.attr, - &sampling_rate.attr, - &up_threshold.attr, - &sampling_down_factor.attr, - &ignore_nice_load.attr, - &io_is_busy.attr, - &down_differential.attr, - &freq_step.attr, - &cpu_up_rate.attr, - &cpu_down_rate.attr, - &cpu_up_freq.attr, - &cpu_down_freq.attr, - &up_nr_cpus.attr, - /* priority: hotplug_lock > max_cpu_lock > min_cpu_lock - Exception: hotplug_lock on early_suspend uses min_cpu_lock */ - &max_cpu_lock.attr, - &min_cpu_lock.attr, - &hotplug_lock.attr, - &dvfs_debug.attr, - &hotplug_freq_1_1.attr, - &hotplug_freq_2_0.attr, - &hotplug_freq_2_1.attr, - &hotplug_freq_3_0.attr, - &hotplug_freq_3_1.attr, - &hotplug_freq_4_0.attr, - &hotplug_rq_1_1.attr, - &hotplug_rq_2_0.attr, - &hotplug_rq_2_1.attr, - &hotplug_rq_3_0.attr, - &hotplug_rq_3_1.attr, - &hotplug_rq_4_0.attr, - &up_threshold_at_min_freq.attr, - &freq_for_responsiveness.attr, - &boostpulse_value.attr, - NULL -}; - -static struct attribute_group dbs_attr_group = { - .attrs = dbs_attributes, - .name = "pegasusq", -}; - -/************************** sysfs end ************************/ - -static void __cpuinit cpu_up_work(struct work_struct *work) -{ - int cpu; - int online = num_online_cpus(); - int nr_up = dbs_tuners_ins.up_nr_cpus; - int min_cpu_lock = dbs_tuners_ins.min_cpu_lock; - int hotplug_lock = atomic_read(&g_hotplug_lock); - - if (hotplug_lock && min_cpu_lock) - nr_up = max(hotplug_lock, min_cpu_lock) - online; - else if (hotplug_lock) - nr_up = hotplug_lock - online; - else if (min_cpu_lock) - nr_up = max(nr_up, min_cpu_lock - online); - - if (online == 1) { - printk(KERN_ERR "CPU_UP 3\n"); - cpu_up(num_possible_cpus() - 1); - nr_up -= 1; - } - - for_each_cpu_not(cpu, cpu_online_mask) { - if (nr_up-- == 0) - break; - if (cpu == 0) - continue; - printk(KERN_ERR "CPU_UP %d\n", cpu); - cpu_up(cpu); - } -} - -static void cpu_down_work(struct work_struct *work) -{ - int cpu; - int online = num_online_cpus(); - int nr_down = 1; - int hotplug_lock = atomic_read(&g_hotplug_lock); - - if (hotplug_lock) - nr_down = online - hotplug_lock; - - for_each_online_cpu(cpu) { - if (cpu == 0) - continue; - printk(KERN_ERR "CPU_DOWN %d\n", cpu); - cpu_down(cpu); - if (--nr_down == 0) - break; - } -} - -static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) -{ -#ifndef CONFIG_ARCH_EXYNOS4 - if (p->cur == p->max) - return; -#endif - - __cpufreq_driver_target(p, freq, CPUFREQ_RELATION_L); -} - -/* - * print hotplug debugging info. - * which 1 : UP, 0 : DOWN - */ -static void debug_hotplug_check(int which, int rq_avg, int freq, - struct cpu_usage *usage) -{ - int cpu; - printk(KERN_ERR "CHECK %s rq %d.%02d freq %d [", which ? "up" : "down", - rq_avg / 100, rq_avg % 100, freq); - for_each_online_cpu(cpu) { - printk(KERN_ERR "(%d, %d), ", cpu, usage->load[cpu]); - } - printk(KERN_ERR "]\n"); -} - -static int check_up(void) -{ - int num_hist = hotplug_history->num_hist; - struct cpu_usage *usage; - int freq, rq_avg; - int avg_load; - int i; - int up_rate = dbs_tuners_ins.cpu_up_rate; - int up_freq, up_rq; - int min_freq = INT_MAX; - int min_rq_avg = INT_MAX; - int min_avg_load = INT_MAX; - int online; - int hotplug_lock = atomic_read(&g_hotplug_lock); - - if (hotplug_lock > 0) - return 0; - - online = num_online_cpus(); - up_freq = hotplug_freq[online - 1][HOTPLUG_UP_INDEX]; - up_rq = hotplug_rq[online - 1][HOTPLUG_UP_INDEX]; - - if (online == num_possible_cpus()) - return 0; - - if (dbs_tuners_ins.max_cpu_lock != 0 - && online >= dbs_tuners_ins.max_cpu_lock) - return 0; - - if (dbs_tuners_ins.min_cpu_lock != 0 - && online < dbs_tuners_ins.min_cpu_lock) - return 1; - - if (num_hist == 0 || num_hist % up_rate) - return 0; - - for (i = num_hist - 1; i >= num_hist - up_rate; --i) { - usage = &hotplug_history->usage[i]; - - freq = usage->freq; - rq_avg = usage->rq_avg; - avg_load = usage->avg_load; - - min_freq = min(min_freq, freq); - min_rq_avg = min(min_rq_avg, rq_avg); - min_avg_load = min(min_avg_load, avg_load); - - if (dbs_tuners_ins.dvfs_debug) - debug_hotplug_check(1, rq_avg, freq, usage); - } - - if (min_freq >= up_freq && min_rq_avg > up_rq) { - if (online >= 2) { - if (min_avg_load < 65) - return 0; - } - printk(KERN_ERR "[HOTPLUG IN] %s %d>=%d && %d>%d\n", - __func__, min_freq, up_freq, min_rq_avg, up_rq); - hotplug_history->num_hist = 0; - return 1; - } - return 0; -} - -static int check_down(void) -{ - int num_hist = hotplug_history->num_hist; - struct cpu_usage *usage; - int freq, rq_avg; - int avg_load; - int i; - int down_rate = dbs_tuners_ins.cpu_down_rate; - int down_freq, down_rq; - int max_freq = 0; - int max_rq_avg = 0; - int max_avg_load = 0; - int online; - int hotplug_lock = atomic_read(&g_hotplug_lock); - - if (hotplug_lock > 0) - return 0; - - online = num_online_cpus(); - down_freq = hotplug_freq[online - 1][HOTPLUG_DOWN_INDEX]; - down_rq = hotplug_rq[online - 1][HOTPLUG_DOWN_INDEX]; - - if (online == 1) - return 0; - - if (dbs_tuners_ins.max_cpu_lock != 0 - && online > dbs_tuners_ins.max_cpu_lock) - return 1; - - if (dbs_tuners_ins.min_cpu_lock != 0 - && online <= dbs_tuners_ins.min_cpu_lock) - return 0; - - if (num_hist == 0 || num_hist % down_rate) - return 0; - - for (i = num_hist - 1; i >= num_hist - down_rate; --i) { - usage = &hotplug_history->usage[i]; - - freq = usage->freq; - rq_avg = usage->rq_avg; - avg_load = usage->avg_load; - - max_freq = max(max_freq, freq); - max_rq_avg = max(max_rq_avg, rq_avg); - max_avg_load = max(max_avg_load, avg_load); - - if (dbs_tuners_ins.dvfs_debug) - debug_hotplug_check(0, rq_avg, freq, usage); - } - - if ((max_freq <= down_freq && max_rq_avg <= down_rq) - || (online >= 3 && max_avg_load < 30)) { - printk(KERN_ERR "[HOTPLUG OUT] %s %d<=%d && %d<%d\n", - __func__, max_freq, down_freq, max_rq_avg, down_rq); - hotplug_history->num_hist = 0; - return 1; - } - - return 0; -} - -static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) -{ - unsigned int max_load_freq; - - struct cpufreq_policy *policy; - unsigned int j; - int num_hist = hotplug_history->num_hist; - int max_hotplug_rate = max(dbs_tuners_ins.cpu_up_rate, - dbs_tuners_ins.cpu_down_rate); - int up_threshold = dbs_tuners_ins.up_threshold; - - /* add total_load, avg_load to get average load */ - unsigned int total_load = 0; - unsigned int avg_load = 0; - int load_each[4] = {-1, -1, -1, -1}; - int rq_avg = 0; - policy = this_dbs_info->cur_policy; - - if (boostpulse_relayf) - { - if (boostpulse_relay_sr != 0) - dbs_tuners_ins.sampling_rate = boostpulse_relay_sr; - boostpulse_relayf = false; - if (policy->cur > Lboostpulse_value) - return; - - __cpufreq_driver_target(policy, Lboostpulse_value, - CPUFREQ_RELATION_H); - return; - } - - hotplug_history->usage[num_hist].freq = policy->cur; - hotplug_history->usage[num_hist].rq_avg = get_nr_run_avg(); - - /* add total_load, avg_load to get average load */ - rq_avg = hotplug_history->usage[num_hist].rq_avg; - - ++hotplug_history->num_hist; - - /* Get Absolute Load - in terms of freq */ - max_load_freq = 0; - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; - cputime64_t prev_wall_time, prev_idle_time, prev_iowait_time; - unsigned int idle_time, wall_time, iowait_time; - unsigned int load, load_freq; - int freq_avg; - - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - prev_wall_time = j_dbs_info->prev_cpu_wall; - prev_idle_time = j_dbs_info->prev_cpu_idle; - prev_iowait_time = j_dbs_info->prev_cpu_iowait; - - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - - wall_time = (unsigned int) cputime64_sub(cur_wall_time, - prev_wall_time); - j_dbs_info->prev_cpu_wall = cur_wall_time; - - idle_time = (unsigned int) cputime64_sub(cur_idle_time, - prev_idle_time); - j_dbs_info->prev_cpu_idle = cur_idle_time; - - iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, - prev_iowait_time); - j_dbs_info->prev_cpu_iowait = cur_iowait_time; - - if (dbs_tuners_ins.ignore_nice) { - cputime64_t cur_nice; - unsigned long cur_nice_jiffies; - - cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - - j_dbs_info->prev_cpu_nice; - /* - * Assumption: nice time between sampling periods will - * be less than 2^32 jiffies for 32 bit sys - */ - cur_nice_jiffies = (unsigned long) - cputime64_to_jiffies64(cur_nice); - - j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - idle_time += jiffies_to_usecs(cur_nice_jiffies); - } - - if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) - idle_time -= iowait_time; - - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - load = 100 * (wall_time - idle_time) / wall_time; - - /* keep load of each CPUs and combined load across all CPUs */ - if (cpu_online(j)) - load_each[j] = load; - total_load += load; - - hotplug_history->usage[num_hist].load[j] = load; - - freq_avg = __cpufreq_driver_getavg(policy, j); - if (freq_avg <= 0) - freq_avg = policy->cur; - - load_freq = load * freq_avg; - if (load_freq > max_load_freq) - max_load_freq = load_freq; - } - /* calculate the average load across all related CPUs */ - avg_load = total_load / num_online_cpus(); - hotplug_history->usage[num_hist].avg_load = avg_load; - //pr_info("LOAD_TIMER - %d - %d - %d - %d", max_load_freq/1000, total_load, avg_load, num_online_cpus()); - - /* Check for CPU hotplug */ - if (check_up()) { - queue_work_on(this_dbs_info->cpu, dvfs_workqueue, - &this_dbs_info->up_work); - } else if (check_down()) { - queue_work_on(this_dbs_info->cpu, dvfs_workqueue, - &this_dbs_info->down_work); - } - if (hotplug_history->num_hist == max_hotplug_rate) - hotplug_history->num_hist = 0; - - /* Check for frequency increase */ - if (policy->cur < dbs_tuners_ins.freq_for_responsiveness) - up_threshold = dbs_tuners_ins.up_threshold_at_min_freq; - /* for fast frequency decrease */ - else - up_threshold = dbs_tuners_ins.up_threshold; - - if (max_load_freq > up_threshold * policy->cur) { - /* for multiple freq_step */ - int inc = policy->max * (dbs_tuners_ins.freq_step - - DEF_FREQ_STEP_DEC * 2) / 100; - int target = 0; - - /* for multiple freq_step */ - if (max_load_freq > (up_threshold + DEF_UP_THRESHOLD_DIFF * 2) - * policy->cur) - inc = policy->max * dbs_tuners_ins.freq_step / 100; - else if (max_load_freq > (up_threshold + DEF_UP_THRESHOLD_DIFF) - * policy->cur) - inc = policy->max * (dbs_tuners_ins.freq_step - - DEF_FREQ_STEP_DEC) / 100; - - target = min(policy->max, policy->cur + inc); - - /* If switching to max speed, apply sampling_down_factor */ - if (policy->cur < policy->max && target == policy->max) - this_dbs_info->rate_mult = - dbs_tuners_ins.sampling_down_factor; - dbs_freq_increase(policy, target); - return; - } - - /* Check for frequency decrease */ -#ifndef CONFIG_ARCH_EXYNOS4 - /* if we cannot reduce the frequency anymore, break out early */ - if (policy->cur == policy->min) - return; -#endif - - /* - * The optimal frequency is the frequency that is the lowest that - * can support the current CPU usage without triggering the up - * policy. To be safe, we focus DOWN_DIFFERENTIAL points under - * the threshold. - */ - if (max_load_freq < - (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * - policy->cur) { - unsigned int freq_next; - unsigned int down_thres; - - freq_next = max_load_freq / - (dbs_tuners_ins.up_threshold - - dbs_tuners_ins.down_differential); - - /* No longer fully busy, reset rate_mult */ - this_dbs_info->rate_mult = 1; - - if (freq_next < policy->min) - freq_next = policy->min; - - - down_thres = dbs_tuners_ins.up_threshold_at_min_freq - - dbs_tuners_ins.down_differential; - - if (freq_next < dbs_tuners_ins.freq_for_responsiveness - && (max_load_freq / freq_next) > down_thres) - freq_next = dbs_tuners_ins.freq_for_responsiveness; - - if (policy->cur == freq_next) - return; - - __cpufreq_driver_target(policy, freq_next, - CPUFREQ_RELATION_L); - } -} - -/*extern void pegasusq_is_active(bool val); - -void boostpulse_relay_pq(void) -{ - if (Lboostpulse_value > 0) - { - //pr_info("BOOST_PULSE_FROM_INTERACTIVE"); - if (dbs_tuners_ins.sampling_rate != min_sampling_rate) - boostpulse_relay_sr = dbs_tuners_ins.sampling_rate; - boostpulse_relayf = true; - dbs_tuners_ins.sampling_rate = min_sampling_rate; - } -}*/ - -static void do_dbs_timer(struct work_struct *work) -{ - struct cpu_dbs_info_s *dbs_info = - container_of(work, struct cpu_dbs_info_s, work.work); - unsigned int cpu = dbs_info->cpu; - int delay; - - mutex_lock(&dbs_info->timer_mutex); - - dbs_check_cpu(dbs_info); - /* We want all CPUs to do sampling nearly on - * same jiffy - */ - delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate - * dbs_info->rate_mult); - - if (num_online_cpus() > 1) - delay -= jiffies % delay; - - queue_delayed_work_on(cpu, dvfs_workqueue, &dbs_info->work, delay); - mutex_unlock(&dbs_info->timer_mutex); -} - -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) -{ - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(DEF_START_DELAY * 1000 * 1000 - + dbs_tuners_ins.sampling_rate); - if (num_online_cpus() > 1) - delay -= jiffies % delay; - - INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - - queue_delayed_work_on(dbs_info->cpu, dvfs_workqueue, - &dbs_info->work, delay + 2 * HZ); -} - -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) -{ - cancel_delayed_work_sync(&dbs_info->work); - cancel_work_sync(&dbs_info->up_work); - cancel_work_sync(&dbs_info->down_work); -} - -static int reboot_notifier_call(struct notifier_block *this, - unsigned long code, void *_cmd) -{ - atomic_set(&g_hotplug_lock, 1); - return NOTIFY_DONE; -} - -static struct notifier_block reboot_notifier = { - .notifier_call = reboot_notifier_call, -}; - -#ifdef CONFIG_HAS_EARLYSUSPEND -static struct early_suspend early_suspend; -unsigned int prev_freq_step; -unsigned int prev_sampling_rate; -static void cpufreq_pegasusq_early_suspend(struct early_suspend *h) -{ -#if EARLYSUSPEND_HOTPLUGLOCK - dbs_tuners_ins.early_suspend = - atomic_read(&g_hotplug_lock); -#endif - prev_freq_step = dbs_tuners_ins.freq_step; - prev_sampling_rate = dbs_tuners_ins.sampling_rate; - dbs_tuners_ins.freq_step = 10; - dbs_tuners_ins.sampling_rate = 200000; -#if EARLYSUSPEND_HOTPLUGLOCK - atomic_set(&g_hotplug_lock, - (dbs_tuners_ins.min_cpu_lock) ? dbs_tuners_ins.min_cpu_lock : 1); - apply_hotplug_lock(); - stop_rq_work(); -#endif -} -static void cpufreq_pegasusq_late_resume(struct early_suspend *h) -{ -#if EARLYSUSPEND_HOTPLUGLOCK - atomic_set(&g_hotplug_lock, dbs_tuners_ins.early_suspend); -#endif - dbs_tuners_ins.early_suspend = -1; - dbs_tuners_ins.freq_step = prev_freq_step; - dbs_tuners_ins.sampling_rate = prev_sampling_rate; -#if EARLYSUSPEND_HOTPLUGLOCK - apply_hotplug_lock(); - start_rq_work(); -#endif -} -#endif - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - //pegasusq_is_active(true); - - prev_apenable = apget_enable_auto_hotplug(); - apenable_auto_hotplug(false); - - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - dbs_tuners_ins.max_freq = policy->max; - dbs_tuners_ins.min_freq = policy->min; - hotplug_history->num_hist = 0; - start_rq_work(); - - mutex_lock(&dbs_mutex); - - dbs_enable++; - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) { - j_dbs_info->prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - } - this_dbs_info->cpu = cpu; - this_dbs_info->rate_mult = 1; - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - rc = sysfs_create_group(cpufreq_global_kobject, - &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - min_sampling_rate = MIN_SAMPLING_RATE; - dbs_tuners_ins.sampling_rate = DEF_SAMPLING_RATE; - dbs_tuners_ins.io_is_busy = 0; - } - mutex_unlock(&dbs_mutex); - - register_reboot_notifier(&reboot_notifier); - - mutex_init(&this_dbs_info->timer_mutex); - dbs_timer_init(this_dbs_info); - -#if !EARLYSUSPEND_HOTPLUGLOCK - register_pm_notifier(&pm_notifier); -#endif -#ifdef CONFIG_HAS_EARLYSUSPEND - register_early_suspend(&early_suspend); -#endif - break; - - case CPUFREQ_GOV_STOP: - //pegasusq_is_active(false); - - apenable_auto_hotplug(prev_apenable); - -#ifdef CONFIG_HAS_EARLYSUSPEND - unregister_early_suspend(&early_suspend); -#endif -#if !EARLYSUSPEND_HOTPLUGLOCK - unregister_pm_notifier(&pm_notifier); -#endif - - dbs_timer_exit(this_dbs_info); - - mutex_lock(&dbs_mutex); - mutex_destroy(&this_dbs_info->timer_mutex); - - unregister_reboot_notifier(&reboot_notifier); - - dbs_enable--; - mutex_unlock(&dbs_mutex); - - stop_rq_work(); - - if (!dbs_enable) - sysfs_remove_group(cpufreq_global_kobject, - &dbs_attr_group); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_dbs_info->timer_mutex); - - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->max, - CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->min, - CPUFREQ_RELATION_L); - - mutex_unlock(&this_dbs_info->timer_mutex); - break; - } - return 0; -} - -static int __init cpufreq_gov_dbs_init(void) -{ - int ret; - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, 0); - - ret = init_rq_avg(); - if (ret) - return ret; - - INIT_WORK(&dbs_info->up_work, cpu_up_work); - INIT_WORK(&dbs_info->down_work, cpu_down_work); - - hotplug_history = kzalloc(sizeof(struct cpu_usage_history), GFP_KERNEL); - if (!hotplug_history) { - pr_err("%s cannot create hotplug history array\n", __func__); - ret = -ENOMEM; - goto err_hist; - } - - dvfs_workqueue = create_workqueue("kpegasusq"); - if (!dvfs_workqueue) { - pr_err("%s cannot create workqueue\n", __func__); - ret = -ENOMEM; - goto err_queue; - } - - ret = cpufreq_register_governor(&cpufreq_gov_pegasusq); - if (ret) - goto err_reg; - -#ifdef CONFIG_HAS_EARLYSUSPEND - early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; - early_suspend.suspend = cpufreq_pegasusq_early_suspend; - early_suspend.resume = cpufreq_pegasusq_late_resume; -#endif - - return ret; - -err_reg: - destroy_workqueue(dvfs_workqueue); -err_queue: - kfree(hotplug_history); -err_hist: - kfree(rq_data); - return ret; -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_pegasusq); - destroy_workqueue(dvfs_workqueue); - kfree(hotplug_history); - kfree(rq_data); -} - -MODULE_AUTHOR("ByungChang Cha "); -MODULE_DESCRIPTION("'cpufreq_pegasusq' - A dynamic cpufreq/cpuhotplug governor"); -MODULE_LICENSE("GPL"); - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PEGASUSQ -fs_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); -#endif -module_exit(cpufreq_gov_dbs_exit); From 7e0f70e6304a15249e64bd4d1fb5da7f5fdef83f Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 09:21:53 -0400 Subject: [PATCH 07/35] Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor Conflicts: drivers/cpufreq/Kconfig drivers/cpufreq/Makefile include/linux/cpufreq.h --- drivers/cpufreq/Kconfig | 116 ++- drivers/cpufreq/Makefile | 24 +- drivers/cpufreq/cpufreq_interactivex.c | 381 ++++++++ drivers/cpufreq/cpufreq_lagfree.c | 662 ++++++++++++++ drivers/cpufreq/cpufreq_lulzactive.c | 1143 ++++++++++++++++++++++++ drivers/cpufreq/cpufreq_minmax.c | 575 ++++++++++++ drivers/cpufreq/cpufreq_smartass.c | 642 +++++++++++++ drivers/cpufreq/cpufreq_smartass2.c | 868 ++++++++++++++++++ include/linux/cpufreq.h | 63 +- 9 files changed, 4458 insertions(+), 16 deletions(-) create mode 100644 drivers/cpufreq/cpufreq_interactivex.c create mode 100644 drivers/cpufreq/cpufreq_lagfree.c create mode 100644 drivers/cpufreq/cpufreq_lulzactive.c create mode 100644 drivers/cpufreq/cpufreq_minmax.c create mode 100644 drivers/cpufreq/cpufreq_smartass.c create mode 100644 drivers/cpufreq/cpufreq_smartass2.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 2e07ae54..d794fc4d 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -230,6 +230,14 @@ config CPU_FREQ_GOV_BADASS module will be called cpufreq_badass. If in doubt, say N. +config CPU_FREQ_DEFAULT_GOV_MINMAX + bool "minmax" + select CPU_FREQ_GOV_MINMAX + select CPU_FREQ_GOV_PERFORMANCE + Use the CPUFreq governor 'minmax' as default. This minimizes the + frequency jumps does by the governor. This is aimed at maximizing + both perfomance and battery life. + config CPU_FREQ_GOV_CONSERVATIVE tristate "'conservative' cpufreq governor" depends on CPU_FREQ @@ -249,10 +257,51 @@ config CPU_FREQ_GOV_CONSERVATIVE To compile this driver as a module, choose M here: the module will be called cpufreq_conservative. - For details, take a look at linux/Documentation/cpu-freq. + For details, take a look at linux/Documentation/cpu-freq. + +config CPU_FREQ_DEFAULT_GOV_SMARTASS2 + bool "smartass2" + select CPU_FREQ_GOV_SMARTASS2 + Use the CPUFreq governor 'smartassV2' as default. + +config CPU_FREQ_DEFAULT_GOV_LAGFREE + bool "lagfree" + select CPU_FREQ_GOV_LAGFREE + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lagfree' as default. This allows + you to get a full dynamic frequency capable system by simply + loading your cpufreq low-level hardware driver. + Be aware that not all cpufreq drivers support the lagfree + governor. If unsure have a look at the help section of the + driver. Fallback governor will be the performance governor. +config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX + bool "interactiveX" + select CPU_FREQ_GOV_INTERACTIVEX + help + Use the CPUFreq governor 'interactivex' as default. This allows + you to get a full dynamic cpu frequency capable system by simply + loading your cpufreq low-level hardware driver, using the + 'interactivex' governor for latency-sensitive workloads. If in doubt, say N. +config CPU_FREQ_DEFAULT_GOV_LULZACTIVE + bool "lulzactive" + select CPU_FREQ_GOV_LULZACTIVE + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lulzactive' as default. + +config CPU_FREQ_DEFAULT_GOV_SMARTASS + bool "smartass" + select CPU_FREQ_GOV_SMARTASS + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'smartass' as default. + +endchoice + config CPU_FREQ_GOV_DANCEDANCE tristate "'dancedance' cpufreq governor" depends on CPU_FREQ @@ -325,10 +374,68 @@ config CPU_FREQ_GOV_USERSPACE If in doubt, say Y. +config CPU_FREQ_GOV_LULZACTIVE + tristate "'lulzactive' cpufreq governor" + depends on CPU_FREQ + help + 'lulzactive' - a new interactive governor by Tegrak! + config CPU_FREQ_GOV_WHEATLEY tristate "'wheatley' cpufreq governor" depends on CPU_FREQ + If in doubt, say N. + +config CPU_FREQ_GOV_SMARTASS + tristate "'smartass' cpufreq governor" + depends on CPU_FREQ + help + 'smartass' - a "smart" optimized governor for the hero! + + If in doubt, say N. + +config CPU_FREQ_GOV_MINMAX + tristate "'minmax' cpufreq governor" + depends on CPU_FREQ + help + 'minmax' - this driver tries to minimize the frequency jumps by limiting + the the selected frequencies to either the min or the max frequency of + the policy. The frequency is selected according to the load. + +config CPU_FREQ_GOV_SMARTASS2 + tristate "'smartassV2' cpufreq governor" + depends on CPU_FREQ + help + 'smartassV2' - a "smart" optimized governor for the hero! + +config CPU_FREQ_GOV_INTERACTIVEX +tristate "'interactiveX' cpufreq policy governor" + help + 'interactiveX' - Modified version of interactive with sleep+wake code. + +config CPU_FREQ_GOV_LAGFREE + tristate "'lagfree' cpufreq governor" + depends on CPU_FREQ + help + 'lagfree' - this driver is rather similar to the 'ondemand' + governor both in its source code and its purpose, the difference is + its optimisation for better suitability in a battery powered + environment. The frequency is gracefully increased and decreased + rather than jumping to 100% when speed is required. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_lagfree. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +config CPU_FREQ_MIN_TICKS + int "Ticks between governor polling interval." + default 10 + help + Minimum number of ticks between polling interval for governors. + config SEC_DVFS bool "DVFS job" default n @@ -339,6 +446,13 @@ config SEC_DVFS_BOOSTER default y depends on SEC_DVFS +config CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER + int "Sampling rate multiplier for governors." + default 1000 + help + Sampling latency rate multiplied by the cpu switch latency. + Affects governor polling. + menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index be135afd..d9355369 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -20,6 +20,14 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o +obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o +obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o +obj-$(CONFIG_CPU_FREQ_GOV_MINMAX) += cpufreq_minmax.o +obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS2) += cpufreq_smartass2.o +obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS) += cpufreq_smartass.o +obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVEX) += cpufreq_interactivex.o +obj-$(CONFIG_CPU_FREQ_GOV_LAGFREE) += cpufreq_lagfree.o +obj-$(CONFIG_CPU_FREQ_GOV_LULZACTIVE) += cpufreq_lulzactive.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o @@ -48,19 +56,7 @@ obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o -################################################################################## +##################################################################################d + # ARM SoC drivers obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o -obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o -obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o -obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o -obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o -obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o -obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o -obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o -obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o - -################################################################################## -# PowerPC platform drivers -obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o -obj-$(CONFIG_MSM_DCVS) += cpufreq_gov_msm.o diff --git a/drivers/cpufreq/cpufreq_interactivex.c b/drivers/cpufreq/cpufreq_interactivex.c new file mode 100644 index 00000000..72ca6291 --- /dev/null +++ b/drivers/cpufreq/cpufreq_interactivex.c @@ -0,0 +1,381 @@ +/* +* drivers/cpufreq/cpufreq_interactivex.c +* +* Copyright (C) 2010 Google, Inc. +* +* This software is licensed under the terms of the GNU General Public +* License version 2, as published by the Free Software Foundation, and +* may be copied, distributed, and modified under those terms. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* Author: Mike Chan (mike@android.com) - modified for suspend/wake by imoseyon +* +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static void (*pm_idle_old)(void); +static atomic_t active_count = ATOMIC_INIT(0); + +static DEFINE_PER_CPU(struct timer_list, cpu_timer); + +static DEFINE_PER_CPU(u64, time_in_idle); +static DEFINE_PER_CPU(u64, idle_exit_time); + +static struct cpufreq_policy *policy; +static unsigned int target_freq; + +/* Workqueues handle frequency scaling */ +static struct workqueue_struct *up_wq; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_work; + +static u64 freq_change_time; +static u64 freq_change_time_in_idle; + +static cpumask_t work_cpumask; + +static unsigned int suspended = 0; +static unsigned int enabled = 0; + +/* +* The minimum ammount of time to spend at a frequency before we can ramp down, +* default is 50ms. +*/ +#define DEFAULT_MIN_SAMPLE_TIME 50000; +static unsigned long min_sample_time; + +#define FREQ_THRESHOLD 998400; +#define RESUME_SPEED 998400; + +static int cpufreq_governor_interactivex(struct cpufreq_policy *policy, +unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX +static +#endif +struct cpufreq_governor cpufreq_gov_interactivex = { +.name = "interactiveX", +.governor = cpufreq_governor_interactivex, +#if defined(CONFIG_ARCH_MSM_SCORPION) +.max_transition_latency = 8000000, +#else +.max_transition_latency = 10000000, +#endif +.owner = THIS_MODULE, +}; + +static void cpufreq_interactivex_timer(unsigned long data) +{ +u64 delta_idle; +u64 update_time; +u64 *cpu_time_in_idle; +u64 *cpu_idle_exit_time; +struct timer_list *t; + +u64 now_idle = get_cpu_idle_time_us(data, +&update_time); + + +cpu_time_in_idle = &per_cpu(time_in_idle, data); +cpu_idle_exit_time = &per_cpu(idle_exit_time, data); + +if (update_time == *cpu_idle_exit_time) +return; + +delta_idle = cputime64_sub(now_idle, *cpu_time_in_idle); + +/* Scale up if there were no idle cycles since coming out of idle */ +if (delta_idle == 0) { +if (policy->cur == policy->max) +return; + +if (nr_running() < 1) +return; + +target_freq = policy->max; + +cpumask_set_cpu(data, &work_cpumask); +queue_work(up_wq, &freq_scale_work); +return; +} + +/* +* There is a window where if the cpu utlization can go from low to high +* between the timer expiring, delta_idle will be > 0 and the cpu will +* be 100% busy, preventing idle from running, and this timer from +* firing. So setup another timer to fire to check cpu utlization. +* Do not setup the timer if there is no scheduled work. +*/ +t = &per_cpu(cpu_timer, data); +if (!timer_pending(t) && nr_running() > 0) { +*cpu_time_in_idle = get_cpu_idle_time_us( +data, cpu_idle_exit_time); +mod_timer(t, jiffies + 2); +} + +if (policy->cur == policy->min) +return; + +/* +* Do not scale down unless we have been at this frequency for the +* minimum sample time. +*/ +if (cputime64_sub(update_time, freq_change_time) < min_sample_time) +return; + +target_freq = policy->min; +cpumask_set_cpu(data, &work_cpumask); +queue_work(down_wq, &freq_scale_work); +} + +static void cpufreq_idle(void) +{ +struct timer_list *t; +u64 *cpu_time_in_idle; +u64 *cpu_idle_exit_time; + +pm_idle_old(); + +if (!cpumask_test_cpu(smp_processor_id(), policy->cpus)) +return; + +/* Timer to fire in 1-2 ticks, jiffie aligned. */ +t = &per_cpu(cpu_timer, smp_processor_id()); +cpu_idle_exit_time = &per_cpu(idle_exit_time, smp_processor_id()); +cpu_time_in_idle = &per_cpu(time_in_idle, smp_processor_id()); + +if (timer_pending(t) == 0) { +*cpu_time_in_idle = get_cpu_idle_time_us( +smp_processor_id(), cpu_idle_exit_time); +mod_timer(t, jiffies + 2); +} +} + +/* +* Choose the cpu frequency based off the load. For now choose the minimum +* frequency that will satisfy the load, which is not always the lower power. +*/ +static unsigned int cpufreq_interactivex_calc_freq(unsigned int cpu) +{ +unsigned int delta_time; +unsigned int idle_time; +unsigned int cpu_load; +unsigned int newfreq; +u64 current_wall_time; +u64 current_idle_time;; + +current_idle_time = get_cpu_idle_time_us(cpu, ¤t_wall_time); + +idle_time = (unsigned int) current_idle_time - freq_change_time_in_idle; +delta_time = (unsigned int) current_wall_time - freq_change_time; + +cpu_load = 100 * (delta_time - idle_time) / delta_time; + +if (cpu_load > 98) newfreq = policy->max; +else newfreq = policy->cur * cpu_load / 100; + +return newfreq; +} + + +/* We use the same work function to sale up and down */ +static void cpufreq_interactivex_freq_change_time_work(struct work_struct *work) +{ +unsigned int cpu; +unsigned int newtarget; +cpumask_t tmp_mask = work_cpumask; +newtarget = FREQ_THRESHOLD; + +for_each_cpu(cpu, &tmp_mask) { +if (!suspended) { +if (target_freq == policy->max) { +if (nr_running() == 1) { +cpumask_clear_cpu(cpu, &work_cpumask); +return; +} +// __cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_H); +__cpufreq_driver_target(policy, newtarget, CPUFREQ_RELATION_H); +} else { +target_freq = cpufreq_interactivex_calc_freq(cpu); +__cpufreq_driver_target(policy, target_freq, +CPUFREQ_RELATION_L); +} +} +freq_change_time_in_idle = get_cpu_idle_time_us(cpu, &freq_change_time); +cpumask_clear_cpu(cpu, &work_cpumask); +} + + +} + +static ssize_t show_min_sample_time(struct kobject *kobj, +struct attribute *attr, char *buf) +{ +return sprintf(buf, "%lu\n", min_sample_time); +} + +static ssize_t store_min_sample_time(struct kobject *kobj, +struct attribute *attr, const char *buf, size_t count) +{ +return strict_strtoul(buf, 0, &min_sample_time); +} + +static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644, +show_min_sample_time, store_min_sample_time); + +static struct attribute *interactivex_attributes[] = { +&min_sample_time_attr.attr, +NULL, +}; + +static struct attribute_group interactivex_attr_group = { +.attrs = interactivex_attributes, +.name = "interactiveX", +}; + +static void interactivex_suspend(int suspend) +{ +unsigned int max_speed; + +max_speed = RESUME_SPEED; + +if (!enabled) return; + if (!suspend) { // resume at max speed: +suspended = 0; + __cpufreq_driver_target(policy, max_speed, CPUFREQ_RELATION_L); + pr_info("[imoseyon] interactiveX awake at %d\n", policy->cur); + } else { +suspended = 1; + __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); + pr_info("[imoseyon] interactiveX suspended at %d\n", policy->cur); + } +} + +static void interactivex_early_suspend(struct early_suspend *handler) { + interactivex_suspend(1); +} + +static void interactivex_late_resume(struct early_suspend *handler) { + interactivex_suspend(0); +} + +static struct early_suspend interactivex_power_suspend = { + .suspend = interactivex_early_suspend, + .resume = interactivex_late_resume, + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +}; + +static int cpufreq_governor_interactivex(struct cpufreq_policy *new_policy, +unsigned int event) +{ +int rc; +switch (event) { +case CPUFREQ_GOV_START: +if (!cpu_online(new_policy->cpu)) +return -EINVAL; + +/* +* Do not register the idle hook and create sysfs +* entries if we have already done so. +*/ +if (atomic_inc_return(&active_count) > 1) +return 0; + +rc = sysfs_create_group(cpufreq_global_kobject, +&interactivex_attr_group); +if (rc) +return rc; + +pm_idle_old = pm_idle; +pm_idle = cpufreq_idle; +policy = new_policy; +enabled = 1; + register_early_suspend(&interactivex_power_suspend); + pr_info("[imoseyon] interactiveX active\n"); +break; + +case CPUFREQ_GOV_STOP: +if (atomic_dec_return(&active_count) > 1) +return 0; + +sysfs_remove_group(cpufreq_global_kobject, +&interactivex_attr_group); + +pm_idle = pm_idle_old; +del_timer(&per_cpu(cpu_timer, new_policy->cpu)); +enabled = 0; + unregister_early_suspend(&interactivex_power_suspend); + pr_info("[imoseyon] interactiveX inactive\n"); +break; + +case CPUFREQ_GOV_LIMITS: +if (new_policy->max < new_policy->cur) +__cpufreq_driver_target(new_policy, +new_policy->max, CPUFREQ_RELATION_H); +else if (new_policy->min > new_policy->cur) +__cpufreq_driver_target(new_policy, +new_policy->min, CPUFREQ_RELATION_L); +break; +} +return 0; +} + +static int __init cpufreq_interactivex_init(void) +{ +unsigned int i; +struct timer_list *t; +min_sample_time = DEFAULT_MIN_SAMPLE_TIME; + +/* Initalize per-cpu timers */ +for_each_possible_cpu(i) { +t = &per_cpu(cpu_timer, i); +init_timer_deferrable(t); +t->function = cpufreq_interactivex_timer; +t->data = i; +} + +/* Scale up is high priority */ +up_wq = create_workqueue("kinteractive_up"); +down_wq = create_workqueue("knteractive_down"); + +INIT_WORK(&freq_scale_work, cpufreq_interactivex_freq_change_time_work); + + pr_info("[imoseyon] interactiveX enter\n"); +return cpufreq_register_governor(&cpufreq_gov_interactivex); +} + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX +fs_initcall(cpufreq_interactivex_init); +#else +module_init(cpufreq_interactivex_init); +#endif + +static void __exit cpufreq_interactivex_exit(void) +{ + pr_info("[imoseyon] interactiveX exit\n"); +cpufreq_unregister_governor(&cpufreq_gov_interactivex); +destroy_workqueue(up_wq); +destroy_workqueue(down_wq); +} + +module_exit(cpufreq_interactivex_exit); + +MODULE_AUTHOR("Mike Chan "); +MODULE_DESCRIPTION("'cpufreq_interactiveX' - A cpufreq governor for " +"Latency sensitive workloads"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq_lagfree.c b/drivers/cpufreq/cpufreq_lagfree.c new file mode 100644 index 00000000..bf274a11 --- /dev/null +++ b/drivers/cpufreq/cpufreq_lagfree.c @@ -0,0 +1,662 @@ +/* + * drivers/cpufreq/cpufreq_lagfree.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * (C) 2004 Alexander Clouter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_UP_THRESHOLD (50) +#define DEF_FREQUENCY_DOWN_THRESHOLD (15) +#define FREQ_STEP_DOWN (160000) +#define FREQ_SLEEP_MAX (320000) +#define FREQ_AWAKE_MIN (480000) +#define FREQ_STEP_UP_SLEEP_PERCENT (20) + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers + * with CPUFREQ_ETERNAL), this governor will not work. + * All times here are in uS. + */ +static unsigned int def_sampling_rate; +unsigned int suspended = 0; +#define MIN_SAMPLING_RATE_RATIO (2) +/* for correct statistics, we need at least 10 ticks between each measure */ +#define MIN_STAT_SAMPLING_RATE \ + (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(CONFIG_CPU_FREQ_MIN_TICKS)) +#define MIN_SAMPLING_RATE \ + (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) +#define MAX_SAMPLING_RATE (500 * def_sampling_rate) +#define DEF_SAMPLING_DOWN_FACTOR (4) +#define MAX_SAMPLING_DOWN_FACTOR (10) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +static void do_dbs_timer(struct work_struct *work); + +struct cpu_dbs_info_s { + struct cpufreq_policy *cur_policy; + unsigned int prev_cpu_idle_up; + unsigned int prev_cpu_idle_down; + unsigned int enable; + unsigned int down_skip; + unsigned int requested_freq; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug + * lock and dbs_mutex. cpu_hotplug lock should always be held before + * dbs_mutex. If any function that can potentially take cpu_hotplug lock + * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then + * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock + * is recursive for the same process. -Venki + */ +static DEFINE_MUTEX (dbs_mutex); +static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); + +struct dbs_tuners { + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int ignore_nice; + //unsigned int freq_step; +}; + +static struct dbs_tuners dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .ignore_nice = 1, + //.freq_step = 5, +}; + +static inline unsigned int get_cpu_idle_time(unsigned int cpu) +{ + unsigned int add_nice = 0, ret; + + if (dbs_tuners_ins.ignore_nice) + add_nice = kstat_cpu(cpu).cpustat.nice; + + ret = kstat_cpu(cpu).cpustat.idle + + kstat_cpu(cpu).cpustat.iowait + + add_nice; + + return ret; +} + +/* keep track of frequency transitions */ +static int +dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, + freq->cpu); + + if (!this_dbs_info->enable) + return 0; + + this_dbs_info->requested_freq = freq->new; + + return 0; +} + +static struct notifier_block dbs_cpufreq_notifier_block = { + .notifier_call = dbs_cpufreq_notifier +}; + +/************************** sysfs interface ************************/ +static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) +{ + return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); +} + +static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) +{ + return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); +} + +#define define_one_ro(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +define_one_ro(sampling_rate_max); +define_one_ro(sampling_rate_min); + +/* cpufreq_lagfree Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct cpufreq_policy *unused, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(sampling_down_factor, sampling_down_factor); +show_one(up_threshold, up_threshold); +show_one(down_threshold, down_threshold); +show_one(ignore_nice_load, ignore_nice); +//show_one(freq_step, freq_step); + +static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf (buf, "%u", &input); + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_down_factor = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_sampling_rate(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf (buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.sampling_rate = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_up_threshold(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf (buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.up_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_down_threshold(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf (buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.down_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + mutex_lock(&dbs_mutex); + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + mutex_unlock(&dbs_mutex); + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); + j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; + } + mutex_unlock(&dbs_mutex); + + return count; +} + +/*static ssize_t store_freq_step(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input > 100) + input = 100; + + / * no need to test here if freq_step is zero as the user might actually + * want this, they would be crazy though :) * / + mutex_lock(&dbs_mutex); + dbs_tuners_ins.freq_step = input; + mutex_unlock(&dbs_mutex); + + return count; +}*/ + +#define define_one_rw(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + +define_one_rw(sampling_rate); +define_one_rw(sampling_down_factor); +define_one_rw(up_threshold); +define_one_rw(down_threshold); +define_one_rw(ignore_nice_load); +//define_one_rw(freq_step); + +static struct attribute * dbs_attributes[] = { + &sampling_rate_max.attr, + &sampling_rate_min.attr, + &sampling_rate.attr, + &sampling_down_factor.attr, + &up_threshold.attr, + &down_threshold.attr, + &ignore_nice_load.attr, + //&freq_step.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "lagfree", +}; + +/************************** sysfs end ************************/ + +static void dbs_check_cpu(int cpu) +{ + unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; + unsigned int tmp_idle_ticks, total_idle_ticks; + unsigned int freq_target; + unsigned int freq_down_sampling_rate; + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); + struct cpufreq_policy *policy; + + if (!this_dbs_info->enable) + return; + + policy = this_dbs_info->cur_policy; + + /* + * The default safe range is 20% to 80% + * Every sampling_rate, we check + * - If current idle time is less than 20%, then we try to + * increase frequency + * Every sampling_rate*sampling_down_factor, we check + * - If current idle time is more than 80%, then we try to + * decrease frequency + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of max_frequency + */ + + /* Check for frequency increase */ + idle_ticks = UINT_MAX; + + /* Check for frequency increase */ + total_idle_ticks = get_cpu_idle_time(cpu); + tmp_idle_ticks = total_idle_ticks - + this_dbs_info->prev_cpu_idle_up; + this_dbs_info->prev_cpu_idle_up = total_idle_ticks; + + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; + + /* Scale idle ticks by 100 and compare with up and down ticks */ + idle_ticks *= 100; + up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * + usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + if (idle_ticks < up_idle_ticks) { + this_dbs_info->down_skip = 0; + this_dbs_info->prev_cpu_idle_down = + this_dbs_info->prev_cpu_idle_up; + + /* if we are already at full speed then break out early */ + if (this_dbs_info->requested_freq == policy->max && !suspended) + return; + + //freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; + if (suspended) + freq_target = (FREQ_STEP_UP_SLEEP_PERCENT * policy->max) / 100; + else + freq_target = policy->max; + + /* max freq cannot be less than 100. But who knows.... */ + if (unlikely(freq_target == 0)) + freq_target = 5; + + this_dbs_info->requested_freq += freq_target; + if (this_dbs_info->requested_freq > policy->max) + this_dbs_info->requested_freq = policy->max; + + //Screen off mode + if (suspended && this_dbs_info->requested_freq > FREQ_SLEEP_MAX) + this_dbs_info->requested_freq = FREQ_SLEEP_MAX; + + //Screen off mode + if (!suspended && this_dbs_info->requested_freq < FREQ_AWAKE_MIN) + this_dbs_info->requested_freq = FREQ_AWAKE_MIN; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, + CPUFREQ_RELATION_H); + return; + } + + /* Check for frequency decrease */ + this_dbs_info->down_skip++; + if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) + return; + + /* Check for frequency decrease */ + total_idle_ticks = this_dbs_info->prev_cpu_idle_up; + tmp_idle_ticks = total_idle_ticks - + this_dbs_info->prev_cpu_idle_down; + this_dbs_info->prev_cpu_idle_down = total_idle_ticks; + + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; + + /* Scale idle ticks by 100 and compare with up and down ticks */ + idle_ticks *= 100; + this_dbs_info->down_skip = 0; + + freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * + dbs_tuners_ins.sampling_down_factor; + down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * + usecs_to_jiffies(freq_down_sampling_rate); + + if (idle_ticks > down_idle_ticks) { + /* + * if we are already at the lowest speed then break out early + * or if we 'cannot' reduce the speed as the user might want + * freq_target to be zero + */ + if (this_dbs_info->requested_freq == policy->min && suspended + /*|| dbs_tuners_ins.freq_step == 0*/) + return; + + //freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; + freq_target = FREQ_STEP_DOWN; //policy->max; + + /* max freq cannot be less than 100. But who knows.... */ + if (unlikely(freq_target == 0)) + freq_target = 5; + + // prevent going under 0 + if(freq_target > this_dbs_info->requested_freq) + this_dbs_info->requested_freq = policy->min; + else + this_dbs_info->requested_freq -= freq_target; + + if (this_dbs_info->requested_freq < policy->min) + this_dbs_info->requested_freq = policy->min; + + //Screen on mode + if (!suspended && this_dbs_info->requested_freq < FREQ_AWAKE_MIN) + this_dbs_info->requested_freq = FREQ_AWAKE_MIN; + + //Screen off mode + if (suspended && this_dbs_info->requested_freq > FREQ_SLEEP_MAX) + this_dbs_info->requested_freq = FREQ_SLEEP_MAX; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, + CPUFREQ_RELATION_H); + return; + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + int i; + mutex_lock(&dbs_mutex); + for_each_online_cpu(i) + dbs_check_cpu(i); + schedule_delayed_work(&dbs_work, + usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); + mutex_unlock(&dbs_mutex); +} + +static inline void dbs_timer_init(void) +{ + init_timer_deferrable(&dbs_work.timer); + schedule_delayed_work(&dbs_work, + usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); + return; +} + +static inline void dbs_timer_exit(void) +{ + cancel_delayed_work(&dbs_work); + return; +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + if (this_dbs_info->enable) /* Already enabled */ + break; + + mutex_lock(&dbs_mutex); + + rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); + j_dbs_info->prev_cpu_idle_down + = j_dbs_info->prev_cpu_idle_up; + } + this_dbs_info->enable = 1; + this_dbs_info->down_skip = 0; + this_dbs_info->requested_freq = policy->cur; + + dbs_enable++; + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + + def_sampling_rate = 10 * latency * + CONFIG_CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER; + + if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) + def_sampling_rate = MIN_STAT_SAMPLING_RATE; + + dbs_tuners_ins.sampling_rate = def_sampling_rate; + + dbs_timer_init(); + cpufreq_register_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + + mutex_unlock(&dbs_mutex); + break; + + case CPUFREQ_GOV_STOP: + mutex_lock(&dbs_mutex); + this_dbs_info->enable = 0; + sysfs_remove_group(&policy->kobj, &dbs_attr_group); + dbs_enable--; + /* + * Stop the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 0) { + dbs_timer_exit(); + cpufreq_unregister_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + + mutex_unlock(&dbs_mutex); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&dbs_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target( + this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target( + this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&dbs_mutex); + break; + } + return 0; +} + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_LAGFREE +static +#endif +struct cpufreq_governor cpufreq_gov_lagfree = { + .name = "lagfree", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +static void lagfree_early_suspend(struct early_suspend *handler) { + suspended = 1; +} + +static void lagfree_late_resume(struct early_suspend *handler) { + suspended = 0; +} + +static struct early_suspend lagfree_power_suspend = { + .suspend = lagfree_early_suspend, + .resume = lagfree_late_resume, + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +}; + +static int __init cpufreq_gov_dbs_init(void) +{ + register_early_suspend(&lagfree_power_suspend); + return cpufreq_register_governor(&cpufreq_gov_lagfree); +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + /* Make sure that the scheduled work is indeed not running */ + flush_scheduled_work(); + + unregister_early_suspend(&lagfree_power_suspend); + cpufreq_unregister_governor(&cpufreq_gov_lagfree); +} + + +MODULE_AUTHOR ("Emilio López "); +MODULE_DESCRIPTION ("'cpufreq_lagfree' - A dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors " + "optimised for use in a battery environment" + "Based on conservative by Alexander Clouter"); +MODULE_LICENSE ("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_LAGFREE +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_lulzactive.c b/drivers/cpufreq/cpufreq_lulzactive.c new file mode 100644 index 00000000..ab5506a6 --- /dev/null +++ b/drivers/cpufreq/cpufreq_lulzactive.c @@ -0,0 +1,1143 @@ +/* + * drivers/cpufreq/cpufreq_lulzactive.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Mike Chan (mike@android.com) + * Edited: Tegrak (luciferanna@gmail.com) + * + * Driver values in /sys/devices/system/cpu/cpufreq/lulzactive + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define LULZACTIVE_VERSION (2) +#define LULZACTIVE_AUTHOR "tegrak" + +// if you changed some codes for optimization, just write your name here. +#define LULZACTIVE_TUNER "simone201" + +#define LOGI(fmt...) printk(KERN_INFO "[lulzactive] " fmt) +#define LOGW(fmt...) printk(KERN_WARNING "[lulzactive] " fmt) +#define LOGD(fmt...) printk(KERN_DEBUG "[lulzactive] " fmt) + +static void (*pm_idle_old)(void); +static atomic_t active_count = ATOMIC_INIT(0); + +struct cpufreq_lulzactive_cpuinfo { + struct timer_list cpu_timer; + int timer_idlecancel; + u64 time_in_idle; + u64 idle_exit_time; + u64 timer_run_time; + int idling; + u64 freq_change_time; + u64 freq_change_time_in_idle; + struct cpufreq_policy *policy; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_table_size; + unsigned int target_freq; + int governor_enabled; +}; + +static DEFINE_PER_CPU(struct cpufreq_lulzactive_cpuinfo, cpuinfo); + +/* Workqueues handle frequency scaling */ +static struct task_struct *up_task; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_down_work; +static cpumask_t up_cpumask; +static spinlock_t up_cpumask_lock; +static cpumask_t down_cpumask; +static spinlock_t down_cpumask_lock; + +/* + * The minimum amount of time to spend at a frequency before we can step up. + */ +#define DEFAULT_UP_SAMPLE_TIME 20000 +static unsigned long up_sample_time; + +/* + * The minimum amount of time to spend at a frequency before we can step down. + */ +#define DEFAULT_DOWN_SAMPLE_TIME 40000 +static unsigned long down_sample_time; + +/* + * DEBUG print flags + */ +static unsigned long debug_mode; +enum { + LULZACTIVE_DEBUG_EARLY_SUSPEND=1, + LULZACTIVE_DEBUG_START_STOP=2, + LULZACTIVE_DEBUG_LOAD=4, + LULZACTIVE_DEBUG_SUSPEND=8, +}; +//#define DEFAULT_DEBUG_MODE (LULZACTIVE_DEBUG_EARLY_SUSPEND | LULZACTIVE_DEBUG_START_STOP | LULZACTIVE_DEBUG_SUSPEND) +#define DEFAULT_DEBUG_MODE (0) + +/* + * CPU freq will be increased if measured load > inc_cpu_load; + */ +#define DEFAULT_INC_CPU_LOAD 75 +static unsigned long inc_cpu_load; + +/* + * CPU freq will be decreased if measured load < dec_cpu_load; + * not implemented yet. + */ +#define DEFAULT_DEC_CPU_LOAD 30 +static unsigned long dec_cpu_load; + +/* + * Increasing frequency table index + * zero disables and causes to always jump straight to max frequency. + */ +#define DEFAULT_PUMP_UP_STEP 1 +static unsigned long pump_up_step; + +/* + * Decreasing frequency table index + * zero disables and will calculate frequency according to load heuristic. + */ +#define DEFAULT_PUMP_DOWN_STEP 1 +static unsigned long pump_down_step; + +/* + * Use minimum frequency while suspended. + */ +static unsigned int suspending; +static unsigned int early_suspended; + +#define SCREEN_OFF_LOWEST_STEP (0xffffffff) +#define DEFAULT_SCREEN_OFF_MIN_STEP (SCREEN_OFF_LOWEST_STEP) +static unsigned long screen_off_min_step; + +#define DEBUG 0 +#define BUFSZ 128 + +#if DEBUG +#include + +struct dbgln { + int cpu; + unsigned long jiffy; + unsigned long run; + char buf[BUFSZ]; +}; + +#define NDBGLNS 256 + +static struct dbgln dbgbuf[NDBGLNS]; +static int dbgbufs; +static int dbgbufe; +static struct proc_dir_entry *dbg_proc; +static spinlock_t dbgpr_lock; + +static u64 up_request_time; +static unsigned int up_max_latency; + +static void dbgpr(char *fmt, ...) +{ + va_list args; + int n; + unsigned long flags; + + spin_lock_irqsave(&dbgpr_lock, flags); + n = dbgbufe; + va_start(args, fmt); + vsnprintf(dbgbuf[n].buf, BUFSZ, fmt, args); + va_end(args); + dbgbuf[n].cpu = smp_processor_id(); + dbgbuf[n].run = nr_running(); + dbgbuf[n].jiffy = jiffies; + + if (++dbgbufe >= NDBGLNS) + dbgbufe = 0; + + if (dbgbufe == dbgbufs) + if (++dbgbufs >= NDBGLNS) + dbgbufs = 0; + + spin_unlock_irqrestore(&dbgpr_lock, flags); +} + +static void dbgdump(void) +{ + int i, j; + unsigned long flags; + static struct dbgln prbuf[NDBGLNS]; + + spin_lock_irqsave(&dbgpr_lock, flags); + i = dbgbufs; + j = dbgbufe; + memcpy(prbuf, dbgbuf, sizeof(dbgbuf)); + dbgbufs = 0; + dbgbufe = 0; + spin_unlock_irqrestore(&dbgpr_lock, flags); + + while (i != j) + { + printk("%lu %d %lu %s", + prbuf[i].jiffy, prbuf[i].cpu, prbuf[i].run, + prbuf[i].buf); + if (++i == NDBGLNS) + i = 0; + } +} + +static int dbg_proc_read(char *buffer, char **start, off_t offset, + int count, int *peof, void *dat) +{ + printk("max up_task latency=%uus\n", up_max_latency); + dbgdump(); + *peof = 1; + return 0; +} + + +#else +#define dbgpr(...) do {} while (0) +#endif + +static int cpufreq_governor_lulzactive(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_LULZACTIVE +static +#endif +struct cpufreq_governor cpufreq_gov_lulzactive = { + .name = "lulzactive", + .governor = cpufreq_governor_lulzactive, + .max_transition_latency = 9000000, + .owner = THIS_MODULE, +}; + +static unsigned int get_freq_table_size(struct cpufreq_frequency_table *freq_table) { + unsigned int size = 0; + while (freq_table[++size].frequency != CPUFREQ_TABLE_END); + return size; +} + +static inline void fix_screen_off_min_step(struct cpufreq_lulzactive_cpuinfo *pcpu) { + if (pcpu->freq_table_size <= 0) { + screen_off_min_step = 0; + return; + } + + if (DEFAULT_SCREEN_OFF_MIN_STEP == screen_off_min_step) + screen_off_min_step = pcpu->freq_table_size - 3; + + if (screen_off_min_step >= pcpu->freq_table_size) + screen_off_min_step = pcpu->freq_table_size - 3; +} + +static inline unsigned int adjust_screen_off_freq( + struct cpufreq_lulzactive_cpuinfo *pcpu, unsigned int freq) { + + if (early_suspended && freq > pcpu->freq_table[screen_off_min_step].frequency) { + freq = pcpu->freq_table[screen_off_min_step].frequency; + pcpu->target_freq = pcpu->policy->cur; + + if (freq > pcpu->policy->max) + freq = pcpu->policy->max; + if (freq < pcpu->policy->min) + freq = pcpu->policy->min; + } + + return freq; +} + +static void cpufreq_lulzactive_timer(unsigned long data) +{ + unsigned int delta_idle; + unsigned int delta_time; + int cpu_load; + int load_since_change; + u64 time_in_idle; + u64 idle_exit_time; + struct cpufreq_lulzactive_cpuinfo *pcpu = + &per_cpu(cpuinfo, data); + u64 now_idle; + unsigned int new_freq; + int index; + int ret; + + /* + * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time, + * this lets idle exit know the current idle time sample has + * been processed, and idle exit can generate a new sample and + * re-arm the timer. This prevents a concurrent idle + * exit on that CPU from writing a new set of info at the same time + * the timer function runs (the timer function can't use that info + * until more time passes). + */ + time_in_idle = pcpu->time_in_idle; + idle_exit_time = pcpu->idle_exit_time; + now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time); + smp_wmb(); + + /* If we raced with cancelling a timer, skip. */ + if (!idle_exit_time) { + dbgpr("timer %d: no valid idle exit sample\n", (int) data); + goto exit; + } + + /* let it be when s5pv310 contorl the suspending by tegrak */ + //if (suspending) { + // goto rearm; + //} + +#if DEBUG + if ((int) jiffies - (int) pcpu->cpu_timer.expires >= 10) + dbgpr("timer %d: late by %d ticks\n", + (int) data, jiffies - pcpu->cpu_timer.expires); +#endif + + delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle); + delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time, + idle_exit_time); + + /* + * If timer ran less than 1ms after short-term sample started, retry. + */ + if (delta_time < 1000) { + dbgpr("timer %d: time delta %u too short exit=%llu now=%llu\n", (int) data, + delta_time, idle_exit_time, pcpu->timer_run_time); + goto rearm; + } + + if (delta_idle > delta_time) + cpu_load = 0; + else + cpu_load = 100 * (delta_time - delta_idle) / delta_time; + + delta_idle = (unsigned int) cputime64_sub(now_idle, + pcpu->freq_change_time_in_idle); + delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time, + pcpu->freq_change_time); + + if (delta_idle > delta_time) + load_since_change = 0; + else + load_since_change = + 100 * (delta_time - delta_idle) / delta_time; + + /* + * Choose greater of short-term load (since last idle timer + * started or timer function re-armed itself) or long-term load + * (since last frequency change). + */ + if (load_since_change > cpu_load) + cpu_load = load_since_change; + + /* + * START lulzactive algorithm section + */ + if (cpu_load >= inc_cpu_load) { + if (pump_up_step && pcpu->policy->cur < pcpu->policy->max) { + ret = cpufreq_frequency_table_target( + pcpu->policy, pcpu->freq_table, + pcpu->policy->cur, CPUFREQ_RELATION_H, + &index); + if (ret < 0) { + goto rearm; + } + + // apply pump_up_step by tegrak + index -= pump_up_step; + if (index < 0) + index = 0; + + new_freq = pcpu->freq_table[index].frequency; + } + else { + new_freq = pcpu->policy->max; + } + } + else { + if (pump_down_step) { + ret = cpufreq_frequency_table_target( + pcpu->policy, pcpu->freq_table, + pcpu->policy->cur, CPUFREQ_RELATION_H, + &index); + if (ret < 0) { + goto rearm; + } + + // apply pump_down_step by tegrak + index += pump_down_step; + if (index >= pcpu->freq_table_size) { + index = pcpu->freq_table_size - 1; + } + + new_freq = (pcpu->policy->cur > pcpu->policy->min) ? + (pcpu->freq_table[index].frequency) : + (pcpu->policy->min); + } + else { + new_freq = pcpu->policy->max * cpu_load / 100; + ret = cpufreq_frequency_table_target( + pcpu->policy, pcpu->freq_table, + new_freq, CPUFREQ_RELATION_H, + &index); + if (ret < 0) { + goto rearm; + } + new_freq = pcpu->freq_table[index].frequency; + } + } + + // adjust freq when screen off + new_freq = adjust_screen_off_freq(pcpu, new_freq); + + if (pcpu->target_freq == new_freq) + { + dbgpr("timer %d: load=%d, already at %d\n", (int) data, cpu_load, new_freq); + goto rearm_if_notmax; + } + + /* + * Do not scale down unless we have been at this frequency for the + * minimum sample time. + */ + if (new_freq < pcpu->target_freq) { + if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) < + down_sample_time) { + dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq); + goto rearm; + } + } + else { + if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) < + up_sample_time) { + dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq); + /* don't reset timer */ + goto rearm; + } + } + + if (suspending && debug_mode & LULZACTIVE_DEBUG_SUSPEND) { + LOGI("suspending: cpu_load=%d%% new_freq=%u ppcpu->policy->cur=%u\n", + cpu_load, new_freq, pcpu->policy->cur); + } + if (early_suspended && !suspending && debug_mode & LULZACTIVE_DEBUG_LOAD) { + LOGI("early_suspended: cpu_load=%d%% new_freq=%u ppcpu->policy->cur=%u\n", + cpu_load, new_freq, pcpu->policy->cur); + } + if (debug_mode & LULZACTIVE_DEBUG_LOAD && !early_suspended && !suspending) { + LOGI("cpu_load=%d%% new_freq=%u pcpu->target_freq=%u pcpu->policy->cur=%u\n", + cpu_load, new_freq, pcpu->target_freq, pcpu->policy->cur); + } + + dbgpr("timer %d: load=%d cur=%d tgt=%d queue\n", (int) data, cpu_load, pcpu->target_freq, new_freq); + + if (new_freq < pcpu->target_freq) { + pcpu->target_freq = new_freq; + spin_lock(&down_cpumask_lock); + cpumask_set_cpu(data, &down_cpumask); + spin_unlock(&down_cpumask_lock); + queue_work(down_wq, &freq_scale_down_work); + } else { + pcpu->target_freq = new_freq; +#if DEBUG + up_request_time = ktime_to_us(ktime_get()); +#endif + spin_lock(&up_cpumask_lock); + cpumask_set_cpu(data, &up_cpumask); + spin_unlock(&up_cpumask_lock); + wake_up_process(up_task); + } + +rearm_if_notmax: + /* + * Already set max speed and don't see a need to change that, + * wait until next idle to re-evaluate, don't need timer. + */ + if (pcpu->target_freq == pcpu->policy->max) + goto exit; + +rearm: + if (!timer_pending(&pcpu->cpu_timer)) { + /* + * If already at min: if that CPU is idle, don't set timer. + * Else cancel the timer if that CPU goes idle. We don't + * need to re-evaluate speed until the next idle exit. + */ + if (pcpu->target_freq == pcpu->policy->min) { + smp_rmb(); + + if (pcpu->idling) { + dbgpr("timer %d: cpu idle, don't re-arm\n", (int) data); + goto exit; + } + + pcpu->timer_idlecancel = 1; + } + + pcpu->time_in_idle = get_cpu_idle_time_us( + data, &pcpu->idle_exit_time); + mod_timer(&pcpu->cpu_timer, jiffies + 2); + dbgpr("timer %d: set timer for %lu exit=%llu\n", (int) data, pcpu->cpu_timer.expires, pcpu->idle_exit_time); + } + +exit: + return; +} + +static void cpufreq_lulzactive_idle(void) +{ + struct cpufreq_lulzactive_cpuinfo *pcpu = + &per_cpu(cpuinfo, smp_processor_id()); + int pending; + + if (!pcpu->governor_enabled) { + pm_idle_old(); + return; + } + + pcpu->idling = 1; + smp_wmb(); + pending = timer_pending(&pcpu->cpu_timer); + + if (pcpu->target_freq != pcpu->policy->min) { +#ifdef CONFIG_SMP + /* + * Entering idle while not at lowest speed. On some + * platforms this can hold the other CPU(s) at that speed + * even though the CPU is idle. Set a timer to re-evaluate + * speed so this idle CPU doesn't hold the other CPUs above + * min indefinitely. This should probably be a quirk of + * the CPUFreq driver. + */ + if (!pending) { + pcpu->time_in_idle = get_cpu_idle_time_us( + smp_processor_id(), &pcpu->idle_exit_time); + pcpu->timer_idlecancel = 0; + mod_timer(&pcpu->cpu_timer, jiffies + 2); + dbgpr("idle: enter at %d, set timer for %lu exit=%llu\n", + pcpu->target_freq, pcpu->cpu_timer.expires, + pcpu->idle_exit_time); + } +#endif + } else { + /* + * If at min speed and entering idle after load has + * already been evaluated, and a timer has been set just in + * case the CPU suddenly goes busy, cancel that timer. The + * CPU didn't go busy; we'll recheck things upon idle exit. + */ + if (pending && pcpu->timer_idlecancel) { + dbgpr("idle: cancel timer for %lu\n", pcpu->cpu_timer.expires); + del_timer(&pcpu->cpu_timer); + /* + * Ensure last timer run time is after current idle + * sample start time, so next idle exit will always + * start a new idle sampling period. + */ + pcpu->idle_exit_time = 0; + pcpu->timer_idlecancel = 0; + } + } + + pm_idle_old(); + pcpu->idling = 0; + smp_wmb(); + + /* + * Arm the timer for 1-2 ticks later if not already, and if the timer + * function has already processed the previous load sampling + * interval. (If the timer is not pending but has not processed + * the previous interval, it is probably racing with us on another + * CPU. Let it compute load based on the previous sample and then + * re-arm the timer for another interval when it's done, rather + * than updating the interval start time to be "now", which doesn't + * give the timer function enough time to make a decision on this + * run.) + */ + if (timer_pending(&pcpu->cpu_timer) == 0 && + pcpu->timer_run_time >= pcpu->idle_exit_time) { + pcpu->time_in_idle = + get_cpu_idle_time_us(smp_processor_id(), + &pcpu->idle_exit_time); + pcpu->timer_idlecancel = 0; + mod_timer(&pcpu->cpu_timer, jiffies + 2); + dbgpr("idle: exit, set timer for %lu exit=%llu\n", pcpu->cpu_timer.expires, pcpu->idle_exit_time); +#if DEBUG + } else if (timer_pending(&pcpu->cpu_timer) == 0 && + pcpu->timer_run_time < pcpu->idle_exit_time) { + dbgpr("idle: timer not run yet: exit=%llu tmrrun=%llu\n", + pcpu->idle_exit_time, pcpu->timer_run_time); +#endif + } + +} + +static int cpufreq_lulzactive_up_task(void *data) +{ + unsigned int cpu; + cpumask_t tmp_mask; + struct cpufreq_lulzactive_cpuinfo *pcpu; + +#if DEBUG + u64 now; + u64 then; + unsigned int lat; +#endif + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + spin_lock(&up_cpumask_lock); + + if (cpumask_empty(&up_cpumask)) { + spin_unlock(&up_cpumask_lock); + schedule(); + + if (kthread_should_stop()) + break; + + spin_lock(&up_cpumask_lock); + } + + set_current_state(TASK_RUNNING); + +#if DEBUG + then = up_request_time; + now = ktime_to_us(ktime_get()); + + if (now > then) { + lat = ktime_to_us(ktime_get()) - then; + + if (lat > up_max_latency) + up_max_latency = lat; + } +#endif + + tmp_mask = up_cpumask; + cpumask_clear(&up_cpumask); + spin_unlock(&up_cpumask_lock); + + for_each_cpu(cpu, &tmp_mask) { + pcpu = &per_cpu(cpuinfo, cpu); + + if (nr_running() == 1) { + dbgpr("up %d: tgt=%d nothing else running\n", cpu, + pcpu->target_freq); + } + + __cpufreq_driver_target(pcpu->policy, + pcpu->target_freq, + CPUFREQ_RELATION_H); + pcpu->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu, + &pcpu->freq_change_time); + dbgpr("up %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur); + } + } + + return 0; +} + +static void cpufreq_lulzactive_freq_down(struct work_struct *work) +{ + unsigned int cpu; + cpumask_t tmp_mask; + struct cpufreq_lulzactive_cpuinfo *pcpu; + + spin_lock(&down_cpumask_lock); + tmp_mask = down_cpumask; + cpumask_clear(&down_cpumask); + spin_unlock(&down_cpumask_lock); + + for_each_cpu(cpu, &tmp_mask) { + pcpu = &per_cpu(cpuinfo, cpu); + __cpufreq_driver_target(pcpu->policy, + pcpu->target_freq, + CPUFREQ_RELATION_H); + pcpu->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu, + &pcpu->freq_change_time); + dbgpr("down %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur); + } +} + +// inc_cpu_load +static ssize_t show_inc_cpu_load(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", inc_cpu_load); +} + +static ssize_t store_inc_cpu_load(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + ssize_t ret; + if(strict_strtoul(buf, 0, &inc_cpu_load)==-EINVAL) return -EINVAL; + + if (inc_cpu_load > 100) { + inc_cpu_load = 100; + } + else if (inc_cpu_load < 10) { + inc_cpu_load = 10; + } + return count; +} + +static struct global_attr inc_cpu_load_attr = __ATTR(inc_cpu_load, 0666, + show_inc_cpu_load, store_inc_cpu_load); + +// down_sample_time +static ssize_t show_down_sample_time(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", down_sample_time); +} + +static ssize_t store_down_sample_time(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + if(strict_strtoul(buf, 0, &down_sample_time)==-EINVAL) return -EINVAL; + return count; +} + +static struct global_attr down_sample_time_attr = __ATTR(down_sample_time, 0666, + show_down_sample_time, store_down_sample_time); + +// up_sample_time +static ssize_t show_up_sample_time(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", up_sample_time); +} + +static ssize_t store_up_sample_time(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + if(strict_strtoul(buf, 0, &up_sample_time)==-EINVAL) return -EINVAL; + return count; +} + +static struct global_attr up_sample_time_attr = __ATTR(up_sample_time, 0666, + show_up_sample_time, store_up_sample_time); + +// debug_mode +static ssize_t show_debug_mode(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", debug_mode); +} + +static ssize_t store_debug_mode(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + if(strict_strtoul(buf, 0, &debug_mode)==-EINVAL) return -EINVAL; + return count; +} + +static struct global_attr debug_mode_attr = __ATTR(debug_mode, 0666, + show_debug_mode, store_debug_mode); + +// pump_up_step +static ssize_t show_pump_up_step(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", pump_up_step); +} + +static ssize_t store_pump_up_step(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + if(strict_strtoul(buf, 0, &pump_up_step)==-EINVAL) return -EINVAL; + return count; +} + +static struct global_attr pump_up_step_attr = __ATTR(pump_up_step, 0666, + show_pump_up_step, store_pump_up_step); + +// pump_down_step +static ssize_t show_pump_down_step(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", pump_down_step); +} + +static ssize_t store_pump_down_step(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + ssize_t ret; + struct cpufreq_lulzactive_cpuinfo *pcpu; + + if(strict_strtoul(buf, 0, &pump_down_step)==-EINVAL) return -EINVAL; + + pcpu = &per_cpu(cpuinfo, 0); + // fix out of bound + if (pcpu->freq_table_size <= pump_down_step) { + pump_down_step = pcpu->freq_table_size - 1; + } + return count; +} + +static struct global_attr pump_down_step_attr = __ATTR(pump_down_step, 0666, + show_pump_down_step, store_pump_down_step); + +// screen_off_min_step +static ssize_t show_screen_off_min_step(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct cpufreq_lulzactive_cpuinfo *pcpu; + + pcpu = &per_cpu(cpuinfo, 0); + fix_screen_off_min_step(pcpu); + + return sprintf(buf, "%lu\n", screen_off_min_step); +} + +static ssize_t store_screen_off_min_step(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + struct cpufreq_lulzactive_cpuinfo *pcpu; + ssize_t ret; + + if(strict_strtoul(buf, 0, &screen_off_min_step)==-EINVAL) return -EINVAL; + + pcpu = &per_cpu(cpuinfo, 0); + fix_screen_off_min_step(pcpu); + + return count; +} + +static struct global_attr screen_off_min_step_attr = __ATTR(screen_off_min_step, 0666, + show_screen_off_min_step, store_screen_off_min_step); + +// author +static ssize_t show_author(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", LULZACTIVE_AUTHOR); +} + +static struct global_attr author_attr = __ATTR(author, 0444, + show_author, NULL); + +// tuner +static ssize_t show_tuner(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", LULZACTIVE_TUNER); +} + +static struct global_attr tuner_attr = __ATTR(tuner, 0444, + show_tuner, NULL); + +// version +static ssize_t show_version(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", LULZACTIVE_VERSION); +} + +static struct global_attr version_attr = __ATTR(version, 0444, + show_version, NULL); + +// freq_table +static ssize_t show_freq_table(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct cpufreq_lulzactive_cpuinfo *pcpu; + char temp[64]; + int i; + + pcpu = &per_cpu(cpuinfo, 0); + + for (i = 0; i < pcpu->freq_table_size; i++) { + sprintf(temp, "%u\n", pcpu->freq_table[i].frequency); + strcat(buf, temp); + } + + return strlen(buf); +} + +static struct global_attr freq_table_attr = __ATTR(freq_table, 0444, + show_freq_table, NULL); + +static struct attribute *lulzactive_attributes[] = { + &inc_cpu_load_attr.attr, + &up_sample_time_attr.attr, + &down_sample_time_attr.attr, + &pump_up_step_attr.attr, + &pump_down_step_attr.attr, + &screen_off_min_step_attr.attr, + &debug_mode_attr.attr, + &author_attr.attr, + &tuner_attr.attr, + &version_attr.attr, + &freq_table_attr.attr, + NULL, +}; + +static struct attribute_group lulzactive_attr_group = { + .attrs = lulzactive_attributes, + .name = "lulzactive", +}; + +static int cpufreq_governor_lulzactive(struct cpufreq_policy *new_policy, + unsigned int event) +{ + int rc; + struct cpufreq_lulzactive_cpuinfo *pcpu = + &per_cpu(cpuinfo, new_policy->cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if (debug_mode & LULZACTIVE_DEBUG_START_STOP) { + LOGI("CPUFREQ_GOV_START\n"); + } + if (!cpu_online(new_policy->cpu)) + return -EINVAL; + + pcpu->policy = new_policy; + pcpu->freq_table = cpufreq_frequency_get_table(new_policy->cpu); + pcpu->target_freq = new_policy->cur; + pcpu->freq_change_time_in_idle = + get_cpu_idle_time_us(new_policy->cpu, + &pcpu->freq_change_time); + pcpu->governor_enabled = 1; + pcpu->freq_table_size = get_freq_table_size(pcpu->freq_table); + + // fix invalid screen_off_min_step + fix_screen_off_min_step(pcpu); + + /* + * Do not register the idle hook and create sysfs + * entries if we have already done so. + */ + if (atomic_inc_return(&active_count) > 1) + return 0; + + rc = sysfs_create_group(cpufreq_global_kobject, + &lulzactive_attr_group); + if (rc) + return rc; + + pm_idle_old = pm_idle; + pm_idle = cpufreq_lulzactive_idle; + break; + + case CPUFREQ_GOV_STOP: + if (debug_mode & LULZACTIVE_DEBUG_START_STOP) { + LOGI("CPUFREQ_GOV_STOP\n"); + } + pcpu->governor_enabled = 0; + + if (atomic_dec_return(&active_count) > 0) + return 0; + + sysfs_remove_group(cpufreq_global_kobject, + &lulzactive_attr_group); + + pm_idle = pm_idle_old; + del_timer(&pcpu->cpu_timer); + break; + + case CPUFREQ_GOV_LIMITS: + if (new_policy->max < new_policy->cur) + __cpufreq_driver_target(new_policy, + new_policy->max, CPUFREQ_RELATION_H); + else if (new_policy->min > new_policy->cur) + __cpufreq_driver_target(new_policy, + new_policy->min, CPUFREQ_RELATION_L); + break; + } + return 0; +} + +static void lulzactive_early_suspend(struct early_suspend *handler) { + struct cpufreq_lulzactive_cpuinfo *pcpu; + unsigned int min_freq, max_freq; + + early_suspended = 1; + + if (debug_mode & LULZACTIVE_DEBUG_EARLY_SUSPEND) { + LOGI("%s\n", __func__); + + pcpu = &per_cpu(cpuinfo, 0); + + min_freq = pcpu->policy->min; + + max_freq = min(pcpu->policy->max, pcpu->freq_table[screen_off_min_step].frequency); + max_freq = max(max_freq, min_freq); + + LOGI("lock @%u~@%uMHz\n", min_freq / 1000, max_freq / 1000); + } +} + +static void lulzactive_late_resume(struct early_suspend *handler) { + early_suspended = 0; + if (debug_mode & LULZACTIVE_DEBUG_EARLY_SUSPEND) { + LOGI("%s\n", __func__); + } +} + +static struct early_suspend lulzactive_power_suspend = { + .suspend = lulzactive_early_suspend, + .resume = lulzactive_late_resume, + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +}; + +static int lulzactive_pm_notifier_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct cpufreq_policy* policy; + + switch (event) { + case PM_SUSPEND_PREPARE: + suspending = 1; + if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { + LOGI("PM_SUSPEND_PREPARE"); + policy = cpufreq_cpu_get(0); + if (policy) { + LOGI("PM_SUSPEND_PREPARE using @%uMHz\n", policy->cur); + } + } + break; + case PM_POST_SUSPEND: + suspending = 0; + if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { + LOGI("PM_POST_SUSPEND"); + policy = cpufreq_cpu_get(0); + if (policy) { + LOGI("PM_POST_SUSPEND using @%uMHz\n", policy->cur); + } + } + break; + case PM_RESTORE_PREPARE: + if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { + LOGI("PM_RESTORE_PREPARE"); + } + break; + case PM_POST_RESTORE: + if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { + LOGI("PM_POST_RESTORE"); + } + break; + case PM_HIBERNATION_PREPARE: + if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { + LOGI("PM_HIBERNATION_PREPARE"); + } + break; + case PM_POST_HIBERNATION: + if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { + LOGI("PM_POST_HIBERNATION"); + } + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block lulzactive_pm_notifier = { + .notifier_call = lulzactive_pm_notifier_event, +}; + +static int __init cpufreq_lulzactive_init(void) +{ + unsigned int i; + struct cpufreq_lulzactive_cpuinfo *pcpu; + struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + + up_sample_time = DEFAULT_UP_SAMPLE_TIME; + down_sample_time = DEFAULT_DOWN_SAMPLE_TIME; + debug_mode = DEFAULT_DEBUG_MODE; + inc_cpu_load = DEFAULT_INC_CPU_LOAD; + dec_cpu_load = DEFAULT_DEC_CPU_LOAD; + pump_up_step = DEFAULT_PUMP_UP_STEP; + pump_down_step = DEFAULT_PUMP_DOWN_STEP; + early_suspended = 0; + suspending = 0; + screen_off_min_step = DEFAULT_SCREEN_OFF_MIN_STEP; + + /* Initalize per-cpu timers */ + for_each_possible_cpu(i) { + pcpu = &per_cpu(cpuinfo, i); + init_timer(&pcpu->cpu_timer); + pcpu->cpu_timer.function = cpufreq_lulzactive_timer; + pcpu->cpu_timer.data = i; + } + + up_task = kthread_create(cpufreq_lulzactive_up_task, NULL, + "klulzactiveup"); + if (IS_ERR(up_task)) + return PTR_ERR(up_task); + + sched_setscheduler_nocheck(up_task, SCHED_FIFO, ¶m); + get_task_struct(up_task); + + /* No rescuer thread, bind to CPU queuing the work for possibly + warm cache (probably doesn't matter much). */ + down_wq = create_workqueue("klulzactive_down"); + + if (! down_wq) + goto err_freeuptask; + + INIT_WORK(&freq_scale_down_work, + cpufreq_lulzactive_freq_down); + +#if DEBUG + spin_lock_init(&dbgpr_lock); + dbg_proc = create_proc_entry("igov", S_IWUSR | S_IRUGO, NULL); + dbg_proc->read_proc = dbg_proc_read; +#endif + spin_lock_init(&down_cpumask_lock); + spin_lock_init(&up_cpumask_lock); + + register_pm_notifier(&lulzactive_pm_notifier); + register_early_suspend(&lulzactive_power_suspend); + + return cpufreq_register_governor(&cpufreq_gov_lulzactive); + +err_freeuptask: + put_task_struct(up_task); + return -ENOMEM; +} + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_LULZACTIVE +fs_initcall(cpufreq_lulzactive_init); +#else +module_init(cpufreq_lulzactive_init); +#endif + +static void __exit cpufreq_lulzactive_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_lulzactive); + unregister_early_suspend(&lulzactive_power_suspend); + unregister_pm_notifier(&lulzactive_pm_notifier); + kthread_stop(up_task); + put_task_struct(up_task); + destroy_workqueue(down_wq); +} + +module_exit(cpufreq_lulzactive_exit); + +MODULE_AUTHOR("Tegrak "); +MODULE_DESCRIPTION("'lulzactive' - improved interactive governor inspired by smartass"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq_minmax.c b/drivers/cpufreq/cpufreq_minmax.c new file mode 100644 index 00000000..09dba0d2 --- /dev/null +++ b/drivers/cpufreq/cpufreq_minmax.c @@ -0,0 +1,575 @@ +/* + * drivers/cpufreq/cpufreq_minmax.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * (C) 2004 Alexander Clouter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This governor is an adapatation of the conservative governor. + * See the Documentation/cpu-freq/governors.txt for more information. + * + * Adapatation from conservative by Erasmux. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_UP_THRESHOLD (92) +#define DEF_FREQUENCY_DOWN_THRESHOLD (27) + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers + * with CPUFREQ_ETERNAL), this governor will not work. + * All times here are in uS. + */ +static unsigned int def_sampling_rate; +#define MIN_SAMPLING_RATE_RATIO (2) +/* for correct statistics, we need at least 10 ticks between each measure */ +#define MIN_STAT_SAMPLING_RATE \ + (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(CONFIG_CPU_FREQ_MIN_TICKS)) +#define MIN_SAMPLING_RATE \ + (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) +#define MAX_SAMPLING_RATE (500 * def_sampling_rate) +#define DEF_SAMPLING_DOWN_FACTOR (10) +#define MAX_SAMPLING_DOWN_FACTOR (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) +#define CONFIG_CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER (500) +#define CONFIG_CPU_FREQ_MIN_TICKS (2) + +static void do_dbs_timer(struct work_struct *work); + +struct cpu_dbs_info_s { + struct cpufreq_policy *cur_policy; + unsigned int prev_cpu_idle_up; + unsigned int prev_cpu_idle_down; + unsigned int enable; + unsigned int down_skip; + unsigned int requested_freq; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug + * lock and dbs_mutex. cpu_hotplug lock should always be held before + * dbs_mutex. If any function that can potentially take cpu_hotplug lock + * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then + * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock + * is recursive for the same process. -Venki + */ +static DEFINE_MUTEX (dbs_mutex); +static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); + +struct dbs_tuners { + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int ignore_nice; +}; + +static struct dbs_tuners dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .ignore_nice = 0, +}; + +static inline unsigned int get_cpu_idle_time(unsigned int cpu) +{ + unsigned int add_nice = 0, ret; + + if (dbs_tuners_ins.ignore_nice) + add_nice = kstat_cpu(cpu).cpustat.nice; + + ret = kstat_cpu(cpu).cpustat.idle + + kstat_cpu(cpu).cpustat.iowait + + add_nice; + + return ret; +} + +/* keep track of frequency transitions */ +static int +dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, + freq->cpu); + + if (!this_dbs_info->enable) + return 0; + + this_dbs_info->requested_freq = freq->new; + + return 0; +} + +static struct notifier_block dbs_cpufreq_notifier_block = { + .notifier_call = dbs_cpufreq_notifier +}; + +/************************** sysfs interface ************************/ +static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) +{ + return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); +} + +static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) +{ + return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); +} + +#define define_one_ro(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +define_one_ro(sampling_rate_max); +define_one_ro(sampling_rate_min); + +/* cpufreq_minmax Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct cpufreq_policy *unused, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(sampling_down_factor, sampling_down_factor); +show_one(up_threshold, up_threshold); +show_one(down_threshold, down_threshold); +show_one(ignore_nice_load, ignore_nice); + +static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf (buf, "%u", &input); + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_down_factor = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_sampling_rate(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf (buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.sampling_rate = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_up_threshold(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf (buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.up_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_down_threshold(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf (buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.down_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + mutex_lock(&dbs_mutex); + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + mutex_unlock(&dbs_mutex); + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); + j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; + } + mutex_unlock(&dbs_mutex); + + return count; +} + +#define define_one_rw(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + +define_one_rw(sampling_rate); +define_one_rw(sampling_down_factor); +define_one_rw(up_threshold); +define_one_rw(down_threshold); +define_one_rw(ignore_nice_load); + +static struct attribute * dbs_attributes[] = { + &sampling_rate_max.attr, + &sampling_rate_min.attr, + &sampling_rate.attr, + &sampling_down_factor.attr, + &up_threshold.attr, + &down_threshold.attr, + &ignore_nice_load.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "minmax", +}; + +/************************** sysfs end ************************/ + +static void dbs_check_cpu(int cpu) +{ + unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; + unsigned int tmp_idle_ticks, total_idle_ticks; + //unsigned int freq_target; + unsigned int freq_down_sampling_rate; + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); + struct cpufreq_policy *policy; + + if (!this_dbs_info->enable) + return; + + policy = this_dbs_info->cur_policy; + + /* + * The default safe range is 20% to 80% + * Every sampling_rate, we check + * - If current idle time is less than 20%, then we try to + * increase frequency + * Every sampling_rate*sampling_down_factor, we check + * - If current idle time is more than 80%, then we try to + * decrease frequency + * + */ + + this_dbs_info->down_skip++; + + /* Check for frequency increase */ + idle_ticks = UINT_MAX; + + /* Check for frequency increase */ + total_idle_ticks = get_cpu_idle_time(cpu); + tmp_idle_ticks = total_idle_ticks - + this_dbs_info->prev_cpu_idle_up; + this_dbs_info->prev_cpu_idle_up = total_idle_ticks; + + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; + + /* Scale idle ticks by 100 and compare with up and down ticks */ + idle_ticks *= 100; + up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * + usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + if (idle_ticks < up_idle_ticks) { + this_dbs_info->down_skip = 0; + this_dbs_info->prev_cpu_idle_down = + this_dbs_info->prev_cpu_idle_up; + + /* if we are already at full speed then break out early */ + if (this_dbs_info->requested_freq == policy->max) + return; + + this_dbs_info->requested_freq = policy->max; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, + CPUFREQ_RELATION_H); + return; + } + + /* Check for frequency decrease */ + if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) + return; + else this_dbs_info->down_skip--; /* just to prevent overflow */ + + + /* Check for frequency decrease */ + total_idle_ticks = this_dbs_info->prev_cpu_idle_up; + tmp_idle_ticks = total_idle_ticks - + this_dbs_info->prev_cpu_idle_down; + this_dbs_info->prev_cpu_idle_down = total_idle_ticks; + + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; + + /* Scale idle ticks by 100 and compare with up and down ticks */ + idle_ticks *= 100; + + freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * + dbs_tuners_ins.sampling_down_factor; + down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * + usecs_to_jiffies(freq_down_sampling_rate); + + if (idle_ticks > down_idle_ticks) { + /* + * if we are already at the lowest speed then break out early + * or if we 'cannot' reduce the speed as the user might want + * freq_target to be zero + */ + if (this_dbs_info->requested_freq == policy->min) + return; + + this_dbs_info->requested_freq = policy->min; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, + CPUFREQ_RELATION_H); + return; + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + int i; + + mutex_lock(&dbs_mutex); + for_each_online_cpu(i) + dbs_check_cpu(i); + schedule_delayed_work(&dbs_work, + usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); + mutex_unlock(&dbs_mutex); +} + +static inline void dbs_timer_init(void) +{ + init_timer_deferrable(&dbs_work.timer); + schedule_delayed_work(&dbs_work, + usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); + return; +} + +static inline void dbs_timer_exit(void) +{ + cancel_delayed_work(&dbs_work); + return; +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + if (this_dbs_info->enable) /* Already enabled */ + break; + + mutex_lock(&dbs_mutex); + + rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); + j_dbs_info->prev_cpu_idle_down + = j_dbs_info->prev_cpu_idle_up; + } + this_dbs_info->enable = 1; + this_dbs_info->down_skip = 0; + this_dbs_info->requested_freq = policy->cur; + + dbs_enable++; + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + + def_sampling_rate = 10 * latency * + CONFIG_CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER; + + if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) + def_sampling_rate = MIN_STAT_SAMPLING_RATE; + + dbs_tuners_ins.sampling_rate = def_sampling_rate; + + dbs_timer_init(); + cpufreq_register_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + + mutex_unlock(&dbs_mutex); + break; + + case CPUFREQ_GOV_STOP: + mutex_lock(&dbs_mutex); + this_dbs_info->enable = 0; + sysfs_remove_group(&policy->kobj, &dbs_attr_group); + dbs_enable--; + /* + * Stop the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 0) { + dbs_timer_exit(); + cpufreq_unregister_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + + mutex_unlock(&dbs_mutex); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&dbs_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target( + this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target( + this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&dbs_mutex); + break; + } + return 0; +} + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_MINMAX +static +#endif +struct cpufreq_governor cpufreq_gov_minmax = { + .name = "minmax", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +static int __init cpufreq_gov_dbs_init(void) +{ + return cpufreq_register_governor(&cpufreq_gov_minmax); +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + /* Make sure that the scheduled work is indeed not running */ + flush_scheduled_work(); + + cpufreq_unregister_governor(&cpufreq_gov_minmax); +} + +MODULE_AUTHOR ("Erasmux"); +MODULE_DESCRIPTION ("'cpufreq_minmax' - A dynamic cpufreq governor which " + "minimizes the frequecy jumps by always selecting either " + "the minimal or maximal frequency"); +MODULE_LICENSE ("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_MINMAX +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_smartass.c b/drivers/cpufreq/cpufreq_smartass.c new file mode 100644 index 00000000..0ba3ee61 --- /dev/null +++ b/drivers/cpufreq/cpufreq_smartass.c @@ -0,0 +1,642 @@ +/* + * drivers/cpufreq/cpufreq_smartass.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Erasmux + * + * Based on the interactive governor By Mike Chan (mike@android.com) + * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net) + * + * requires to add + * EXPORT_SYMBOL_GPL(nr_running); + * at the end of kernel/sched.c + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void (*pm_idle_old)(void); +static atomic_t active_count = ATOMIC_INIT(0); + +struct smartass_info_s { + struct cpufreq_policy *cur_policy; + struct timer_list timer; + u64 time_in_idle; + u64 idle_exit_time; + unsigned int force_ramp_up; + unsigned int enable; +}; +static DEFINE_PER_CPU(struct smartass_info_s, smartass_info); + +/* Workqueues handle frequency scaling */ +static struct workqueue_struct *up_wq; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_work; + +static u64 freq_change_time; +static u64 freq_change_time_in_idle; + +static cpumask_t work_cpumask; +static unsigned int suspended; + + +/* + * The minimum amount of time to spend at a frequency before we can ramp down, + * default is 45ms. + */ +#define DEFAULT_RAMP_DOWN_RATE_NS 45000; +static unsigned long ramp_down_rate_ns; + +/* + * When ramping up frequency jump to at least this frequency. + */ + +#define DEFAULT_UP_MIN_FREQ (800*1000) +static unsigned int up_min_freq; + +/* + * When sleep_max_freq>0 the frequency when suspended will be capped + * by this frequency. Also will wake up at max frequency of policy + * to minimize wakeup issues. + * Set sleep_max_freq=0 to disable this behavior. + */ +#define DEFAULT_SLEEP_MAX_FREQ (400*1000) +static unsigned int sleep_max_freq; + +/* + * Sampling rate, I highly recommend to leave it at 2. + */ +#define DEFAULT_SAMPLE_RATE_JIFFIES 2 +static unsigned int sample_rate_jiffies; + +/* + * Max freqeuncy delta when ramping up. + */ + +#define DEFAULT_MAX_RAMP_UP (300 * 1000) +static unsigned int max_ramp_up; + +/* + * CPU freq will be increased if measured load > max_cpu_load; + */ +#define DEFAULT_MAX_CPU_LOAD 60 +static unsigned long max_cpu_load; + +/* + * CPU freq will be decreased if measured load < min_cpu_load; + */ +#define DEFAULT_MIN_CPU_LOAD 30 +static unsigned long min_cpu_load; + +//Leave this zero by default, people can tweak it if they so wish. +#define DEFAULT_RAMP_UP_RATE_NS 0 +static unsigned long ramp_up_rate_ns; + + +static int cpufreq_governor_smartass(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS +static +#endif +struct cpufreq_governor cpufreq_gov_smartass = { + .name = "smartass", + .governor = cpufreq_governor_smartass, + .max_transition_latency = 9000000, + .owner = THIS_MODULE, +}; + +static void cpufreq_smartass_timer(unsigned long data) +{ + u64 delta_idle; + u64 update_time; + u64 now_idle; + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, data); + struct cpufreq_policy *policy = this_smartass->cur_policy; + + now_idle = get_cpu_idle_time_us(data, &update_time); + + if (update_time == this_smartass->idle_exit_time) + return; + + delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle); + //printk(KERN_INFO "smartass: t=%llu i=%llu\n",cputime64_sub(update_time,this_smartass->idle_exit_time),delta_idle); + + /* Scale up if there were no idle cycles since coming out of idle */ + if (delta_idle == 0 && cputime64_sub(update_time, freq_change_time) > ramp_up_rate_ns) { + if (policy->cur == policy->max) + return; + + if (nr_running() < 1) + return; + + this_smartass->force_ramp_up = 1; + cpumask_set_cpu(data, &work_cpumask); + queue_work(up_wq, &freq_scale_work); + return; + } + + /* + * There is a window where if the cpu utlization can go from low to high + * between the timer expiring, delta_idle will be > 0 and the cpu will + * be 100% busy, preventing idle from running, and this timer from + * firing. So setup another timer to fire to check cpu utlization. + * Do not setup the timer if there is no scheduled work. + */ + if (!timer_pending(&this_smartass->timer) && nr_running() > 0) { + this_smartass->time_in_idle = get_cpu_idle_time_us( + data, &this_smartass->idle_exit_time); + mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies); + } + + if (policy->cur == policy->min) + return; + + /* + * Do not scale down unless we have been at this frequency for the + * minimum sample time. + */ + if (cputime64_sub(update_time, freq_change_time) < ramp_down_rate_ns) + return; + + + cpumask_set_cpu(data, &work_cpumask); + queue_work(down_wq, &freq_scale_work); +} + +static void cpufreq_idle(void) +{ + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + + pm_idle_old(); + + if (!cpumask_test_cpu(smp_processor_id(), policy->cpus)) + return; + + /* Timer to fire in 1-2 ticks, jiffie aligned. */ + if (timer_pending(&this_smartass->timer) == 0) { + this_smartass->time_in_idle = get_cpu_idle_time_us( + smp_processor_id(), &this_smartass->idle_exit_time); + mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies); + } +} + +/* + * Choose the cpu frequency based off the load. For now choose the minimum + * frequency that will satisfy the load, which is no +t always the lower power. + */ +static unsigned int cpufreq_smartass_calc_freq(unsigned int cpu, struct cpufreq_policy *policy) +{ + unsigned int delta_time; + unsigned int idle_time; + unsigned int cpu_load; + unsigned int new_freq; + u64 current_wall_time; + u64 current_idle_time; + + + current_idle_time = get_cpu_idle_time_us(cpu, ¤t_wall_time); + + idle_time = (unsigned int)( current_idle_time - freq_change_time_in_idle ); + delta_time = (unsigned int)( current_wall_time - freq_change_time ); + + cpu_load = 100 * (delta_time - idle_time) / delta_time; + if (cpu_load < min_cpu_load) { + //if the current frequency is below 1.2ghz, everything is 200mhz steps + if(policy->cur <= 1200000 && policy->cur >= 400000) { +/* catch the extra 200mhz gap between 400 and 800 when scaling down -netarchy */ + if(policy->cur == 800000) { + new_freq = policy->cur - 400000; + return new_freq; + } + else { + new_freq = policy->cur - 200000; + return new_freq; + } + } + //above 1.2ghz though, everything is 100mhz steps + else { + new_freq = policy->cur - 100000; + return new_freq; + } + } + if (cpu_load > max_cpu_load) { + if(policy->cur < 1200000 && policy->cur > 100000) { +/* catch the gap between 400 and 800 when scaling up -netarchy */ + if(policy->cur == 400000) { + new_freq = policy->cur + 400000; + return new_freq; + } + else { + new_freq = policy->cur + 200000; + return new_freq; + } + } + else { + new_freq = policy->cur + 100000; + return new_freq; + } + } + return policy->cur; +} + +/* We use the same work function to sale up and down */ +static void cpufreq_smartass_freq_change_time_work(struct work_struct *work) +{ + unsigned int cpu; + unsigned int new_freq; + struct smartass_info_s *this_smartass; + struct cpufreq_policy *policy; + cpumask_t tmp_mask = work_cpumask; + for_each_cpu(cpu, tmp_mask) { + this_smartass = &per_cpu(smartass_info, cpu); + policy = this_smartass->cur_policy; + + if (this_smartass->force_ramp_up) { + this_smartass->force_ramp_up = 0; + + if (nr_running() == 1) { + cpumask_clear_cpu(cpu, &work_cpumask); + return; + } + + if (policy->cur == policy->max) + return; + + new_freq = policy->cur + max_ramp_up; + + if (suspended && sleep_max_freq) { + if (new_freq > sleep_max_freq) + new_freq = sleep_max_freq; + } else { + if (new_freq < up_min_freq) + new_freq = up_min_freq; + } + + } else { + new_freq = cpufreq_smartass_calc_freq(cpu,policy); + + // in suspend limit to sleep_max_freq and + // jump straight to sleep_max_freq to avoid wakeup problems + if (suspended && sleep_max_freq && + (new_freq > sleep_max_freq || new_freq > policy->cur)) + new_freq = sleep_max_freq; + } + + if (new_freq > policy->max) + new_freq = policy->max; + + if (new_freq < policy->min) + new_freq = policy->min; + + __cpufreq_driver_target(policy, new_freq, + CPUFREQ_RELATION_L); + + freq_change_time_in_idle = get_cpu_idle_time_us(cpu, + &freq_change_time); + + cpumask_clear_cpu(cpu, &work_cpumask); + + } + + +} + +static ssize_t show_ramp_up_rate_ns(struct cpufreq_policy *policy, char *buf) { + return sprintf(buf, "%lu\n", ramp_up_rate_ns); +} + +static ssize_t store_ramp_up_rate_ns(struct cpufreq_policy *policy, const char *buf, size_t count) { + ssize_t ret; + unsigned long input; + ret = strict_strtoul(buf, 0, &input); + if (ret >= 0 && input >= 0 && input <= 100000000) + ramp_up_rate_ns = input; + return ret; +} + +static struct freq_attr ramp_up_rate_ns_attr = __ATTR(ramp_up_rate_ns, 0644, + show_ramp_up_rate_ns, store_ramp_up_rate_ns); + +static ssize_t show_ramp_down_rate_ns(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%lu\n", ramp_down_rate_ns); +} + +static ssize_t store_ramp_down_rate_ns(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 1000 && input <= 100000000) + ramp_down_rate_ns = input; + return res; +} + +static struct freq_attr ramp_down_rate_ns_attr = __ATTR(ramp_down_rate_ns, 0644, + show_ramp_down_rate_ns, store_ramp_down_rate_ns); + +static ssize_t show_up_min_freq(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", up_min_freq); +} + +static ssize_t store_up_min_freq(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + up_min_freq = input; + return res; +} + +static struct freq_attr up_min_freq_attr = __ATTR(up_min_freq, 0644, + show_up_min_freq, store_up_min_freq); + +static ssize_t show_sleep_max_freq(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", sleep_max_freq); +} + +static ssize_t store_sleep_max_freq(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + sleep_max_freq = input; + return res; +} + +static struct freq_attr sleep_max_freq_attr = __ATTR(sleep_max_freq, 0644, + show_sleep_max_freq, store_sleep_max_freq); + +static ssize_t show_sample_rate_jiffies(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", sample_rate_jiffies); +} + +static ssize_t store_sample_rate_jiffies(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 1000) + sample_rate_jiffies = input; + return res; +} + +static struct freq_attr sample_rate_jiffies_attr = __ATTR(sample_rate_jiffies, 0644, + show_sample_rate_jiffies, store_sample_rate_jiffies); + +static ssize_t show_max_ramp_up(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", max_ramp_up); +} + +static ssize_t store_max_ramp_up(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 10000) + max_ramp_up = input; + return res; +} + +static struct freq_attr max_ramp_up_attr = __ATTR(max_ramp_up, 0644, + show_max_ramp_up, store_max_ramp_up); + +static ssize_t show_max_cpu_load(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%lu\n", max_cpu_load); +} + +static ssize_t store_max_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 100) + max_cpu_load = input; + return res; +} + +static struct freq_attr max_cpu_load_attr = __ATTR(max_cpu_load, 0644, + show_max_cpu_load, store_max_cpu_load); + +static ssize_t show_min_cpu_load(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%lu\n", min_cpu_load); +} + +static ssize_t store_min_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input < 100) + min_cpu_load = input; + return res; +} + +static struct freq_attr min_cpu_load_attr = __ATTR(min_cpu_load, 0644, + show_min_cpu_load, store_min_cpu_load); + +static struct attribute * smartass_attributes[] = { + &ramp_down_rate_ns_attr.attr, + &up_min_freq_attr.attr, + &sleep_max_freq_attr.attr, + &sample_rate_jiffies_attr.attr, + &max_ramp_up_attr.attr, + &max_cpu_load_attr.attr, + &min_cpu_load_attr.attr, + &ramp_up_rate_ns_attr.attr, + NULL, +}; + +static struct attribute_group smartass_attr_group = { + .attrs = smartass_attributes, + .name = "smartass", +}; + +static int cpufreq_governor_smartass(struct cpufreq_policy *new_policy, + unsigned int event) +{ + unsigned int cpu = new_policy->cpu; + int rc; + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!new_policy->cur)) + return -EINVAL; + + if (this_smartass->enable) /* Already enabled */ + break; + + /* + * Do not register the idle hook and create sysfs + * entries if we have already done so. + */ + if (atomic_inc_return(&active_count) > 1) + return 0; + + rc = sysfs_create_group(&new_policy->kobj, &smartass_attr_group); + if (rc) + return rc; + pm_idle_old = pm_idle; + pm_idle = cpufreq_idle; + + this_smartass->cur_policy = new_policy; + this_smartass->enable = 1; + + // notice no break here! + + case CPUFREQ_GOV_LIMITS: + if (this_smartass->cur_policy->cur != new_policy->max) + __cpufreq_driver_target(new_policy, new_policy->max, CPUFREQ_RELATION_H); + + break; + + case CPUFREQ_GOV_STOP: + this_smartass->enable = 0; + + if (atomic_dec_return(&active_count) > 1) + return 0; + sysfs_remove_group(&new_policy->kobj, + &smartass_attr_group); + + pm_idle = pm_idle_old; + del_timer(&this_smartass->timer); + break; + } + + return 0; +} + +static void smartass_suspend(int cpu, int suspend) +{ + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + unsigned int new_freq; + + if (!this_smartass->enable || sleep_max_freq==0) // disable behavior for sleep_max_freq==0 + return; + + if (suspend) { + if (policy->cur > sleep_max_freq) { + new_freq = sleep_max_freq; + if (new_freq > policy->max) + new_freq = policy->max; + if (new_freq < policy->min) + new_freq = policy->min; + __cpufreq_driver_target(policy, new_freq, + CPUFREQ_RELATION_H); + } + } else { // resume at max speed: + __cpufreq_driver_target(policy, policy->max, + CPUFREQ_RELATION_H); + } + +} + +static void smartass_early_suspend(struct early_suspend *handler) { + int i; + suspended = 1; + for_each_online_cpu(i) + smartass_suspend(i,1); +} + +static void smartass_late_resume(struct early_suspend *handler) { + int i; + suspended = 0; + for_each_online_cpu(i) + smartass_suspend(i,0); +} + +static struct early_suspend smartass_power_suspend = { + .suspend = smartass_early_suspend, + .resume = smartass_late_resume, +}; + +static int __init cpufreq_smartass_init(void) +{ + unsigned int i; + struct smartass_info_s *this_smartass; + ramp_down_rate_ns = DEFAULT_RAMP_DOWN_RATE_NS; + up_min_freq = DEFAULT_UP_MIN_FREQ; + sleep_max_freq = DEFAULT_SLEEP_MAX_FREQ; + sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES; + max_ramp_up = DEFAULT_MAX_RAMP_UP; + max_cpu_load = DEFAULT_MAX_CPU_LOAD; + min_cpu_load = DEFAULT_MIN_CPU_LOAD; + ramp_up_rate_ns = DEFAULT_RAMP_UP_RATE_NS; + + suspended = 0; + + /* Initalize per-cpu data: */ + for_each_possible_cpu(i) { + this_smartass = &per_cpu(smartass_info, i); + this_smartass->enable = 0; + this_smartass->force_ramp_up = 0; + this_smartass->time_in_idle = 0; + this_smartass->idle_exit_time = 0; + // intialize timer: + init_timer_deferrable(&this_smartass->timer); + this_smartass->timer.function = cpufreq_smartass_timer; + this_smartass->timer.data = i; + } + + /* Scale up is high priority */ + up_wq = create_workqueue("ksmartass_up"); + down_wq = create_workqueue("ksmartass_down"); + + INIT_WORK(&freq_scale_work, cpufreq_smartass_freq_change_time_work); + + register_early_suspend(&smartass_power_suspend); + + return cpufreq_register_governor(&cpufreq_gov_smartass); +} + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS +pure_initcall(cpufreq_smartass_init); +#else +module_init(cpufreq_smartass_init); +#endif + +static void __exit cpufreq_smartass_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_smartass); + destroy_workqueue(up_wq); + destroy_workqueue(down_wq); +} + +module_exit(cpufreq_smartass_exit); + +MODULE_AUTHOR ("Erasmux"); +MODULE_DESCRIPTION ("'cpufreq_minmax' - A smart cpufreq governor optimized for the hero!"); +MODULE_LICENSE ("GPL"); + diff --git a/drivers/cpufreq/cpufreq_smartass2.c b/drivers/cpufreq/cpufreq_smartass2.c new file mode 100644 index 00000000..05c39ded --- /dev/null +++ b/drivers/cpufreq/cpufreq_smartass2.c @@ -0,0 +1,868 @@ +/* + * drivers/cpufreq/cpufreq_smartass2.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Erasmux + * + * Based on the interactive governor By Mike Chan (mike@android.com) + * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net) + * + * SMP support based on mod by faux123 + * + * For a general overview of smartassV2 see the relavent part in + * Documentation/cpu-freq/governors.txt + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/******************** Tunable parameters: ********************/ + +/* + * The "ideal" frequency to use when awake. The governor will ramp up faster + * towards the ideal frequency and slower after it has passed it. Similarly, + * lowering the frequency towards the ideal frequency is faster than below it. + */ +#define DEFAULT_AWAKE_IDEAL_FREQ 800000 +static unsigned int awake_ideal_freq; + +/* + * The "ideal" frequency to use when suspended. + * When set to 0, the governor will not track the suspended state (meaning + * that practically when sleep_ideal_freq==0 the awake_ideal_freq is used + * also when suspended). + */ +#define DEFAULT_SLEEP_IDEAL_FREQ 100000 +static unsigned int sleep_ideal_freq; + +/* + * Freqeuncy delta when ramping up above the ideal freqeuncy. + * Zero disables and causes to always jump straight to max frequency. + * When below the ideal freqeuncy we always ramp up to the ideal freq. + */ +#define DEFAULT_RAMP_UP_STEP 256000 +static unsigned int ramp_up_step; + +/* + * Freqeuncy delta when ramping down below the ideal freqeuncy. + * Zero disables and will calculate ramp down according to load heuristic. + * When above the ideal freqeuncy we always ramp down to the ideal freq. + */ +#define DEFAULT_RAMP_DOWN_STEP 256000 +static unsigned int ramp_down_step; + +/* + * CPU freq will be increased if measured load > max_cpu_load; + */ +#define DEFAULT_MAX_CPU_LOAD 50 +static unsigned long max_cpu_load; + +/* + * CPU freq will be decreased if measured load < min_cpu_load; + */ +#define DEFAULT_MIN_CPU_LOAD 25 +static unsigned long min_cpu_load; + +/* + * The minimum amount of time to spend at a frequency before we can ramp up. + * Notice we ignore this when we are below the ideal frequency. + */ +#define DEFAULT_UP_RATE_US 48000; +static unsigned long up_rate_us; + +/* + * The minimum amount of time to spend at a frequency before we can ramp down. + * Notice we ignore this when we are above the ideal frequency. + */ +#define DEFAULT_DOWN_RATE_US 99000; +static unsigned long down_rate_us; + +/* + * The frequency to set when waking up from sleep. + * When sleep_ideal_freq=0 this will have no effect. + */ +#define DEFAULT_SLEEP_WAKEUP_FREQ 99999999 +static unsigned int sleep_wakeup_freq; + +/* + * Sampling rate, I highly recommend to leave it at 2. + */ +#define DEFAULT_SAMPLE_RATE_JIFFIES 2 +static unsigned int sample_rate_jiffies; + + +/*************** End of tunables ***************/ + + +static void (*pm_idle_old)(void); +static atomic_t active_count = ATOMIC_INIT(0); + +struct smartass_info_s { + struct cpufreq_policy *cur_policy; + struct cpufreq_frequency_table *freq_table; + struct timer_list timer; + u64 time_in_idle; + u64 idle_exit_time; + u64 freq_change_time; + u64 freq_change_time_in_idle; + int cur_cpu_load; + int old_freq; + int ramp_dir; + unsigned int enable; + int ideal_speed; +}; +static DEFINE_PER_CPU(struct smartass_info_s, smartass_info); + +/* Workqueues handle frequency scaling */ +static struct workqueue_struct *up_wq; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_work; + +static cpumask_t work_cpumask; +static spinlock_t cpumask_lock; + +static unsigned int suspended; + +#define dprintk(flag,msg...) do { \ + if (debug_mask & flag) printk(KERN_DEBUG msg); \ + } while (0) + +enum { + SMARTASS_DEBUG_JUMPS=1, + SMARTASS_DEBUG_LOAD=2, + SMARTASS_DEBUG_ALG=4 +}; + +/* + * Combination of the above debug flags. + */ +static unsigned long debug_mask; + +static int cpufreq_governor_smartass(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 +static +#endif +struct cpufreq_governor cpufreq_gov_smartass2 = { + .name = "smartassV2", + .governor = cpufreq_governor_smartass, + .max_transition_latency = 9000000, + .owner = THIS_MODULE, +}; + +inline static void smartass_update_min_max(struct smartass_info_s *this_smartass, struct cpufreq_policy *policy, int suspend) { + if (suspend) { + this_smartass->ideal_speed = // sleep_ideal_freq; but make sure it obeys the policy min/max + policy->max > sleep_ideal_freq ? + (sleep_ideal_freq > policy->min ? sleep_ideal_freq : policy->min) : policy->max; + } else { + this_smartass->ideal_speed = // awake_ideal_freq; but make sure it obeys the policy min/max + policy->min < awake_ideal_freq ? + (awake_ideal_freq < policy->max ? awake_ideal_freq : policy->max) : policy->min; + } +} + +inline static void smartass_update_min_max_allcpus(void) { + unsigned int i; + for_each_online_cpu(i) { + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, i); + if (this_smartass->enable) + smartass_update_min_max(this_smartass,this_smartass->cur_policy,suspended); + } +} + +inline static unsigned int validate_freq(struct cpufreq_policy *policy, int freq) { + if (freq > (int)policy->max) + return policy->max; + if (freq < (int)policy->min) + return policy->min; + return freq; +} + +inline static void reset_timer(unsigned long cpu, struct smartass_info_s *this_smartass) { + this_smartass->time_in_idle = get_cpu_idle_time_us(cpu, &this_smartass->idle_exit_time); + mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies); +} + +inline static void work_cpumask_set(unsigned long cpu) { + unsigned long flags; + spin_lock_irqsave(&cpumask_lock, flags); + cpumask_set_cpu(cpu, &work_cpumask); + spin_unlock_irqrestore(&cpumask_lock, flags); +} + +inline static int work_cpumask_test_and_clear(unsigned long cpu) { + unsigned long flags; + int res = 0; + spin_lock_irqsave(&cpumask_lock, flags); + res = cpumask_test_and_clear_cpu(cpu, &work_cpumask); + spin_unlock_irqrestore(&cpumask_lock, flags); + return res; +} + +inline static int target_freq(struct cpufreq_policy *policy, struct smartass_info_s *this_smartass, + int new_freq, int old_freq, int prefered_relation) { + int index, target; + struct cpufreq_frequency_table *table = this_smartass->freq_table; + + if (new_freq == old_freq) + return 0; + new_freq = validate_freq(policy,new_freq); + if (new_freq == old_freq) + return 0; + + if (table && + !cpufreq_frequency_table_target(policy,table,new_freq,prefered_relation,&index)) + { + target = table[index].frequency; + if (target == old_freq) { + // if for example we are ramping up to *at most* current + ramp_up_step + // but there is no such frequency higher than the current, try also + // to ramp up to *at least* current + ramp_up_step. + if (new_freq > old_freq && prefered_relation==CPUFREQ_RELATION_H + && !cpufreq_frequency_table_target(policy,table,new_freq, + CPUFREQ_RELATION_L,&index)) + target = table[index].frequency; + // simlarly for ramping down: + else if (new_freq < old_freq && prefered_relation==CPUFREQ_RELATION_L + && !cpufreq_frequency_table_target(policy,table,new_freq, + CPUFREQ_RELATION_H,&index)) + target = table[index].frequency; + } + + if (target == old_freq) { + // We should not get here: + // If we got here we tried to change to a validated new_freq which is different + // from old_freq, so there is no reason for us to remain at same frequency. + printk(KERN_WARNING "Smartass: frequency change failed: %d to %d => %d\n", + old_freq,new_freq,target); + return 0; + } + } + else target = new_freq; + + __cpufreq_driver_target(policy, target, prefered_relation); + + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassQ: jumping from %d to %d => %d (%d)\n", + old_freq,new_freq,target,policy->cur); + + return target; +} + +static void cpufreq_smartass_timer(unsigned long cpu) +{ + u64 delta_idle; + u64 delta_time; + int cpu_load; + int old_freq; + u64 update_time; + u64 now_idle; + int queued_work = 0; + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); + struct cpufreq_policy *policy = this_smartass->cur_policy; + + now_idle = get_cpu_idle_time_us(cpu, &update_time); + old_freq = policy->cur; + + if (this_smartass->idle_exit_time == 0 || update_time == this_smartass->idle_exit_time) + return; + + delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle); + delta_time = cputime64_sub(update_time, this_smartass->idle_exit_time); + + // If timer ran less than 1ms after short-term sample started, retry. + if (delta_time < 1000) { + if (!timer_pending(&this_smartass->timer)) + reset_timer(cpu,this_smartass); + return; + } + + if (delta_idle > delta_time) + cpu_load = 0; + else + cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time; + + dprintk(SMARTASS_DEBUG_LOAD,"smartassT @ %d: load %d (delta_time %llu)\n", + old_freq,cpu_load,delta_time); + + this_smartass->cur_cpu_load = cpu_load; + this_smartass->old_freq = old_freq; + + // Scale up if load is above max or if there where no idle cycles since coming out of idle, + // additionally, if we are at or above the ideal_speed, verify we have been at this frequency + // for at least up_rate_us: + if (cpu_load > max_cpu_load || delta_idle == 0) + { + if (old_freq < policy->max && + (old_freq < this_smartass->ideal_speed || delta_idle == 0 || + cputime64_sub(update_time, this_smartass->freq_change_time) >= up_rate_us)) + { + dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp up: load %d (delta_idle %llu)\n", + old_freq,cpu_load,delta_idle); + this_smartass->ramp_dir = 1; + work_cpumask_set(cpu); + queue_work(up_wq, &freq_scale_work); + queued_work = 1; + } + else this_smartass->ramp_dir = 0; + } + // Similarly for scale down: load should be below min and if we are at or below ideal + // frequency we require that we have been at this frequency for at least down_rate_us: + else if (cpu_load < min_cpu_load && old_freq > policy->min && + (old_freq > this_smartass->ideal_speed || + cputime64_sub(update_time, this_smartass->freq_change_time) >= down_rate_us)) + { + dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp down: load %d (delta_idle %llu)\n", + old_freq,cpu_load,delta_idle); + this_smartass->ramp_dir = -1; + work_cpumask_set(cpu); + queue_work(down_wq, &freq_scale_work); + queued_work = 1; + } + else this_smartass->ramp_dir = 0; + + // To avoid unnecessary load when the CPU is already at high load, we don't + // reset ourselves if we are at max speed. If and when there are idle cycles, + // the idle loop will activate the timer. + // Additionally, if we queued some work, the work task will reset the timer + // after it has done its adjustments. + if (!queued_work && old_freq < policy->max) + reset_timer(cpu,this_smartass); +} + +static void cpufreq_idle(void) +{ + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + + if (!this_smartass->enable) { + pm_idle_old(); + return; + } + + if (policy->cur == policy->min && timer_pending(&this_smartass->timer)) + del_timer(&this_smartass->timer); + + pm_idle_old(); + + if (!timer_pending(&this_smartass->timer)) + reset_timer(smp_processor_id(), this_smartass); +} + +/* We use the same work function to sale up and down */ +static void cpufreq_smartass_freq_change_time_work(struct work_struct *work) +{ + unsigned int cpu; + int new_freq; + int old_freq; + int ramp_dir; + struct smartass_info_s *this_smartass; + struct cpufreq_policy *policy; + unsigned int relation = CPUFREQ_RELATION_L; + for_each_possible_cpu(cpu) { + this_smartass = &per_cpu(smartass_info, cpu); + if (!work_cpumask_test_and_clear(cpu)) + continue; + + ramp_dir = this_smartass->ramp_dir; + this_smartass->ramp_dir = 0; + + old_freq = this_smartass->old_freq; + policy = this_smartass->cur_policy; + + if (old_freq != policy->cur) { + // frequency was changed by someone else? + printk(KERN_WARNING "Smartass: frequency changed by 3rd party: %d to %d\n", + old_freq,policy->cur); + new_freq = old_freq; + } + else if (ramp_dir > 0 && nr_running() > 1) { + // ramp up logic: + if (old_freq < this_smartass->ideal_speed) + new_freq = this_smartass->ideal_speed; + else if (ramp_up_step) { + new_freq = old_freq + ramp_up_step; + relation = CPUFREQ_RELATION_H; + } + else { + new_freq = policy->max; + relation = CPUFREQ_RELATION_H; + } + dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp up: ramp_dir=%d ideal=%d\n", + old_freq,ramp_dir,this_smartass->ideal_speed); + } + else if (ramp_dir < 0) { + // ramp down logic: + if (old_freq > this_smartass->ideal_speed) { + new_freq = this_smartass->ideal_speed; + relation = CPUFREQ_RELATION_H; + } + else if (ramp_down_step) + new_freq = old_freq - ramp_down_step; + else { + // Load heuristics: Adjust new_freq such that, assuming a linear + // scaling of load vs. frequency, the load in the new frequency + // will be max_cpu_load: + new_freq = old_freq * this_smartass->cur_cpu_load / max_cpu_load; + if (new_freq > old_freq) // min_cpu_load > max_cpu_load ?! + new_freq = old_freq -1; + } + dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp down: ramp_dir=%d ideal=%d\n", + old_freq,ramp_dir,this_smartass->ideal_speed); + } + else { // ramp_dir==0 ?! Could the timer change its mind about a queued ramp up/down + // before the work task gets to run? + // This may also happen if we refused to ramp up because the nr_running()==1 + new_freq = old_freq; + dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d nothing: ramp_dir=%d nr_running=%lu\n", + old_freq,ramp_dir,nr_running()); + } + + // do actual ramp up (returns 0, if frequency change failed): + new_freq = target_freq(policy,this_smartass,new_freq,old_freq,relation); + if (new_freq) + this_smartass->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time); + + // reset timer: + if (new_freq < policy->max) + reset_timer(cpu,this_smartass); + // if we are maxed out, it is pointless to use the timer + // (idle cycles wake up the timer when the timer comes) + else if (timer_pending(&this_smartass->timer)) + del_timer(&this_smartass->timer); + } +} + +static ssize_t show_debug_mask(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", debug_mask); +} + +static ssize_t store_debug_mask(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0) + debug_mask = input; + return res; +} + +static ssize_t show_up_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", up_rate_us); +} + +static ssize_t store_up_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0 && input <= 100000000) + up_rate_us = input; + return res; +} + +static ssize_t show_down_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", down_rate_us); +} + +static ssize_t store_down_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0 && input <= 100000000) + down_rate_us = input; + return res; +} + +static ssize_t show_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sleep_ideal_freq); +} + +static ssize_t store_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) { + sleep_ideal_freq = input; + if (suspended) + smartass_update_min_max_allcpus(); + } + return res; +} + +static ssize_t show_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sleep_wakeup_freq); +} + +static ssize_t store_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + sleep_wakeup_freq = input; + return res; +} + +static ssize_t show_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", awake_ideal_freq); +} + +static ssize_t store_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) { + awake_ideal_freq = input; + if (!suspended) + smartass_update_min_max_allcpus(); + } + return res; +} + +static ssize_t show_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sample_rate_jiffies); +} + +static ssize_t store_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 1000) + sample_rate_jiffies = input; + return res; +} + +static ssize_t show_ramp_up_step(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", ramp_up_step); +} + +static ssize_t store_ramp_up_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + ramp_up_step = input; + return res; +} + +static ssize_t show_ramp_down_step(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", ramp_down_step); +} + +static ssize_t store_ramp_down_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + ramp_down_step = input; + return res; +} + +static ssize_t show_max_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", max_cpu_load); +} + +static ssize_t store_max_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 100) + max_cpu_load = input; + return res; +} + +static ssize_t show_min_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", min_cpu_load); +} + +static ssize_t store_min_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input < 100) + min_cpu_load = input; + return res; +} + +#define define_global_rw_attr(_name) \ +static struct global_attr _name##_attr = \ + __ATTR(_name, 0644, show_##_name, store_##_name) + +define_global_rw_attr(debug_mask); +define_global_rw_attr(up_rate_us); +define_global_rw_attr(down_rate_us); +define_global_rw_attr(sleep_ideal_freq); +define_global_rw_attr(sleep_wakeup_freq); +define_global_rw_attr(awake_ideal_freq); +define_global_rw_attr(sample_rate_jiffies); +define_global_rw_attr(ramp_up_step); +define_global_rw_attr(ramp_down_step); +define_global_rw_attr(max_cpu_load); +define_global_rw_attr(min_cpu_load); + +static struct attribute * smartass_attributes[] = { + &debug_mask_attr.attr, + &up_rate_us_attr.attr, + &down_rate_us_attr.attr, + &sleep_ideal_freq_attr.attr, + &sleep_wakeup_freq_attr.attr, + &awake_ideal_freq_attr.attr, + &sample_rate_jiffies_attr.attr, + &ramp_up_step_attr.attr, + &ramp_down_step_attr.attr, + &max_cpu_load_attr.attr, + &min_cpu_load_attr.attr, + NULL, +}; + +static struct attribute_group smartass_attr_group = { + .attrs = smartass_attributes, + .name = "smartass", +}; + +static int cpufreq_governor_smartass(struct cpufreq_policy *new_policy, + unsigned int event) +{ + unsigned int cpu = new_policy->cpu; + int rc; + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!new_policy->cur)) + return -EINVAL; + + this_smartass->cur_policy = new_policy; + + this_smartass->enable = 1; + + smartass_update_min_max(this_smartass,new_policy,suspended); + + this_smartass->freq_table = cpufreq_frequency_get_table(cpu); + if (!this_smartass->freq_table) + printk(KERN_WARNING "Smartass: no frequency table for cpu %d?!\n",cpu); + + smp_wmb(); + + // Do not register the idle hook and create sysfs + // entries if we have already done so. + if (atomic_inc_return(&active_count) <= 1) { + rc = sysfs_create_group(cpufreq_global_kobject, + &smartass_attr_group); + if (rc) + return rc; + + pm_idle_old = pm_idle; + pm_idle = cpufreq_idle; + } + + if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer)) + reset_timer(cpu,this_smartass); + + break; + + case CPUFREQ_GOV_LIMITS: + smartass_update_min_max(this_smartass,new_policy,suspended); + + if (this_smartass->cur_policy->cur > new_policy->max) { + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new max freq: %d\n",new_policy->max); + __cpufreq_driver_target(this_smartass->cur_policy, + new_policy->max, CPUFREQ_RELATION_H); + } + else if (this_smartass->cur_policy->cur < new_policy->min) { + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new min freq: %d\n",new_policy->min); + __cpufreq_driver_target(this_smartass->cur_policy, + new_policy->min, CPUFREQ_RELATION_L); + } + + if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer)) + reset_timer(cpu,this_smartass); + + break; + + case CPUFREQ_GOV_STOP: + this_smartass->enable = 0; + smp_wmb(); + del_timer(&this_smartass->timer); + flush_work(&freq_scale_work); + this_smartass->idle_exit_time = 0; + + if (atomic_dec_return(&active_count) <= 1) { + sysfs_remove_group(cpufreq_global_kobject, + &smartass_attr_group); + pm_idle = pm_idle_old; + } + break; + } + + return 0; +} + +static void smartass_suspend(int cpu, int suspend) +{ + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + unsigned int new_freq; + + if (!this_smartass->enable) + return; + + smartass_update_min_max(this_smartass,policy,suspend); + if (!suspend) { // resume at max speed: + new_freq = validate_freq(policy,sleep_wakeup_freq); + + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: awaking at %d\n",new_freq); + + __cpufreq_driver_target(policy, new_freq, + CPUFREQ_RELATION_L); + } else { + // to avoid wakeup issues with quick sleep/wakeup don't change actual frequency when entering sleep + // to allow some time to settle down. Instead we just reset our statistics (and reset the timer). + // Eventually, the timer will adjust the frequency if necessary. + + this_smartass->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time); + + dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: suspending at %d\n",policy->cur); + } + + reset_timer(smp_processor_id(),this_smartass); +} + +static void smartass_early_suspend(struct early_suspend *handler) { + int i; + if (suspended || sleep_ideal_freq==0) // disable behavior for sleep_ideal_freq==0 + return; + suspended = 1; + for_each_online_cpu(i) + smartass_suspend(i,1); +} + +static void smartass_late_resume(struct early_suspend *handler) { + int i; + if (!suspended) // already not suspended so nothing to do + return; + suspended = 0; + for_each_online_cpu(i) + smartass_suspend(i,0); +} + +static struct early_suspend smartass_power_suspend = { + .suspend = smartass_early_suspend, + .resume = smartass_late_resume, +#ifdef CONFIG_MACH_HERO + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +#endif +}; + +static int __init cpufreq_smartass_init(void) +{ + unsigned int i; + struct smartass_info_s *this_smartass; + debug_mask = 0; + up_rate_us = DEFAULT_UP_RATE_US; + down_rate_us = DEFAULT_DOWN_RATE_US; + sleep_ideal_freq = DEFAULT_SLEEP_IDEAL_FREQ; + sleep_wakeup_freq = DEFAULT_SLEEP_WAKEUP_FREQ; + awake_ideal_freq = DEFAULT_AWAKE_IDEAL_FREQ; + sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES; + ramp_up_step = DEFAULT_RAMP_UP_STEP; + ramp_down_step = DEFAULT_RAMP_DOWN_STEP; + max_cpu_load = DEFAULT_MAX_CPU_LOAD; + min_cpu_load = DEFAULT_MIN_CPU_LOAD; + + spin_lock_init(&cpumask_lock); + + suspended = 0; + + /* Initalize per-cpu data: */ + for_each_possible_cpu(i) { + this_smartass = &per_cpu(smartass_info, i); + this_smartass->enable = 0; + this_smartass->cur_policy = 0; + this_smartass->ramp_dir = 0; + this_smartass->time_in_idle = 0; + this_smartass->idle_exit_time = 0; + this_smartass->freq_change_time = 0; + this_smartass->freq_change_time_in_idle = 0; + this_smartass->cur_cpu_load = 0; + // intialize timer: + init_timer_deferrable(&this_smartass->timer); + this_smartass->timer.function = cpufreq_smartass_timer; + this_smartass->timer.data = i; + work_cpumask_test_and_clear(i); + } + + // Scale up is high priority + up_wq = alloc_workqueue("ksmartass_up", WQ_HIGHPRI, 1); + down_wq = alloc_workqueue("ksmartass_down", 0, 1); + if (!up_wq || !down_wq) + return -ENOMEM; + + INIT_WORK(&freq_scale_work, cpufreq_smartass_freq_change_time_work); + + register_early_suspend(&smartass_power_suspend); + + return cpufreq_register_governor(&cpufreq_gov_smartass2); +} + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 +fs_initcall(cpufreq_smartass_init); +#else +module_init(cpufreq_smartass_init); +#endif + +static void __exit cpufreq_smartass_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_smartass2); + destroy_workqueue(up_wq); + destroy_workqueue(down_wq); +} + +module_exit(cpufreq_smartass_exit); + +MODULE_AUTHOR ("Erasmux"); +MODULE_DESCRIPTION ("'cpufreq_smartass2' - A smart cpufreq governor"); +MODULE_LICENSE ("GPL"); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 986b0aac..a301beed 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -24,6 +25,9 @@ #define CPUFREQ_NAME_LEN 16 +/********************************************************************* + * CPUFREQ NOTIFIER INTERFACE * + *********************************************************************/ #define CPUFREQ_TRANSITION_NOTIFIER (0) #define CPUFREQ_POLICY_NOTIFIER (1) @@ -46,6 +50,10 @@ static inline int cpufreq_unregister_notifier(struct notifier_block *nb, static inline void disable_cpufreq(void) { } #endif +/* if (cpufreq_driver->target) exists, the ->governor decides what frequency + * within the limits is used. If (cpufreq_driver->setpolicy> exists, these + * two generic policies are available: + */ #define CPUFREQ_POLICY_POWERSAVE (1) #define CPUFREQ_POLICY_PERFORMANCE (2) @@ -55,6 +63,7 @@ static inline void disable_cpufreq(void) { } struct cpufreq_governor; +/* /sys/devices/system/cpu/cpufreq: entry point for global variables */ extern struct kobject *cpufreq_global_kobject; #define CPUFREQ_ETERNAL (-1) @@ -105,6 +114,7 @@ struct cpufreq_policy { #define CPUFREQ_SHARED_TYPE_ALL (2) #define CPUFREQ_SHARED_TYPE_ANY (3) +/******************** cpufreq transition notifiers *******************/ #define CPUFREQ_PRECHANGE (0) #define CPUFREQ_POSTCHANGE (1) @@ -119,6 +129,15 @@ struct cpufreq_freqs { }; +/** + * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch safe) + * @old: old value + * @div: divisor + * @mult: multiplier + * + * + * new = old * mult / div + */ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mult) { #if BITS_PER_LONG == 32 @@ -136,6 +155,9 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mu #endif }; +/********************************************************************* + * CPUFREQ GOVERNORS * + *********************************************************************/ #define CPUFREQ_GOV_START 1 #define CPUFREQ_GOV_STOP 2 @@ -154,6 +176,9 @@ struct cpufreq_governor { struct module *owner; }; +/* + * Pass a target to the cpufreq driver. + */ extern int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation); @@ -211,6 +236,7 @@ struct cpufreq_driver { struct freq_attr **attr; }; +/* flags */ #define CPUFREQ_STICKY 0x01 #define CPUFREQ_CONST_LOOPS 0x02 @@ -274,10 +300,14 @@ static struct global_attr _name = \ __ATTR(_name, 0644, show_##_name, store_##_name) +/********************************************************************* + * CPUFREQ 2.6. INTERFACE * + *********************************************************************/ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_update_policy(unsigned int cpu); #ifdef CONFIG_CPU_FREQ +/* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */ unsigned int cpufreq_get(unsigned int cpu); #else static inline unsigned int cpufreq_get(unsigned int cpu) @@ -286,6 +316,7 @@ static inline unsigned int cpufreq_get(unsigned int cpu) } #endif +/* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ #ifdef CONFIG_CPU_FREQ unsigned int cpufreq_quick_get(unsigned int cpu); unsigned int cpufreq_quick_get_max(unsigned int cpu); @@ -301,8 +332,15 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) #endif +/********************************************************************* + * CPUFREQ DEFAULT GOVERNOR * + *********************************************************************/ +/* + Performance governor is fallback governor if any other gov failed to + auto load due latency restrictions +*/ #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE extern struct cpufreq_governor cpufreq_gov_performance; #endif @@ -320,15 +358,36 @@ extern struct cpufreq_governor cpufreq_gov_ondemand; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE) extern struct cpufreq_governor cpufreq_gov_conservative; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE) +#endif#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE) extern struct cpufreq_governor cpufreq_gov_interactive; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactive) #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND) extern struct cpufreq_governor cpufreq_gov_intellidemand; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_intellidemand) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2) +extern struct cpufreq_governor cpufreq_gov_smartass2; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_smartass2) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_MINMAX) +extern struct cpufreq_governor cpufreq_gov_minmax; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_minmax) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX) +extern struct cpufreq_governor cpufreq_gov_interactivex; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactivex) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_LAGFREE) +extern struct cpufreq_governor cpufreq_gov_lagfree; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_lagfree) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_LULZACTIVE) +extern struct cpufreq_governor cpufreq_gov_lulzactive; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_lulzactive) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS) +extern struct cpufreq_governor cpufreq_gov_smartass; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_smartass) #endif +/********************************************************************* + * FREQUENCY TABLE HELPERS * + *********************************************************************/ #define CPUFREQ_ENTRY_INVALID ~0 #define CPUFREQ_TABLE_END ~1 @@ -350,10 +409,12 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, unsigned int relation, unsigned int *index); +/* the following 3 funtions are for cpufreq core use only */ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); void cpufreq_cpu_put(struct cpufreq_policy *data); +/* the following are really really optional */ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, From 198875804c0e0037dd74072f4d083b8ae06df7c0 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 09:30:26 -0400 Subject: [PATCH 08/35] Added CPUfreq governor 'lazy'. Conflicts: drivers/cpufreq/Kconfig drivers/cpufreq/Makefile include/linux/cpufreq.h Conflicts: drivers/cpufreq/Kconfig include/linux/cpufreq.h --- drivers/cpufreq/Kconfig | 56 +++ drivers/cpufreq/Makefile | 5 + drivers/cpufreq/cpufreq_lazy.c | 822 +++++++++++++++++++++++++++++++++ include/linux/cpufreq.h | 3 + 4 files changed, 886 insertions(+) create mode 100644 drivers/cpufreq/cpufreq_lazy.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index d794fc4d..ba4d2b48 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -234,6 +234,7 @@ config CPU_FREQ_DEFAULT_GOV_MINMAX bool "minmax" select CPU_FREQ_GOV_MINMAX select CPU_FREQ_GOV_PERFORMANCE + help Use the CPUFreq governor 'minmax' as default. This minimizes the frequency jumps does by the governor. This is aimed at maximizing both perfomance and battery life. @@ -262,6 +263,7 @@ config CPU_FREQ_GOV_CONSERVATIVE config CPU_FREQ_DEFAULT_GOV_SMARTASS2 bool "smartass2" select CPU_FREQ_GOV_SMARTASS2 + help Use the CPUFreq governor 'smartassV2' as default. config CPU_FREQ_DEFAULT_GOV_LAGFREE @@ -275,6 +277,7 @@ config CPU_FREQ_DEFAULT_GOV_LAGFREE Be aware that not all cpufreq drivers support the lagfree governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. + config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX bool "interactiveX" select CPU_FREQ_GOV_INTERACTIVEX @@ -350,6 +353,14 @@ config CPU_FREQ_GOV_POWERSAVE If in doubt, say Y. +config CPU_FREQ_DEFAULT_GOV_LAZY + bool "lazy" + select CPU_FREQ_GOV_LAZY + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lazy' as default. +endchoice + config CPU_FREQ_GOV_SLP tristate "'slp' cpufreq policy governor" @@ -374,6 +385,47 @@ config CPU_FREQ_GOV_USERSPACE If in doubt, say Y. +config CPU_FREQ_GOV_ONDEMAND + tristate "'ondemand' cpufreq policy governor" + select CPU_FREQ_TABLE + help + 'ondemand' - This driver adds a dynamic cpufreq policy governor. + The governor does a periodic polling and + changes frequency based on the CPU utilization. + The support for this governor depends on CPU capability to + do fast frequency switching (i.e, very low latency frequency + transitions). + + To compile this driver as a module, choose M here: the + module will be called cpufreq_ondemand. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +config CPU_FREQ_GOV_CONSERVATIVE + tristate "'conservative' cpufreq governor" + depends on CPU_FREQ + help + 'conservative' - this driver is rather similar to the 'ondemand' + governor both in its source code and its purpose, the difference is + its optimisation for better suitability in a battery powered + environment. The frequency is gracefully increased and decreased + rather than jumping to 100% when speed is required. + + If you have a desktop machine then you should really be considering + the 'ondemand' governor instead, however if you are using a laptop, + PDA or even an AMD64 based computer (due to the unacceptable + step-by-step latency issues between the minimum and maximum frequency + transitions in the CPU) you will probably want to use this governor. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_conservative. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + config CPU_FREQ_GOV_LULZACTIVE tristate "'lulzactive' cpufreq governor" depends on CPU_FREQ @@ -453,6 +505,10 @@ config CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER Sampling latency rate multiplied by the cpu switch latency. Affects governor polling. +config CPU_FREQ_GOV_LAZY + tristate "'lazy' cpufreq governor" + depends on CPU_FREQ + menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index d9355369..b529a80f 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -22,12 +22,17 @@ obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o +<<<<<<< HEAD obj-$(CONFIG_CPU_FREQ_GOV_MINMAX) += cpufreq_minmax.o obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS2) += cpufreq_smartass2.o obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS) += cpufreq_smartass.o obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVEX) += cpufreq_interactivex.o obj-$(CONFIG_CPU_FREQ_GOV_LAGFREE) += cpufreq_lagfree.o obj-$(CONFIG_CPU_FREQ_GOV_LULZACTIVE) += cpufreq_lulzactive.o +======= +obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o +obj-$(CONFIG_CPU_FREQ_GOV_LAZY) += cpufreq_lazy.o +>>>>>>> 17381f7... Added CPUfreq governor 'lazy'. # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/drivers/cpufreq/cpufreq_lazy.c b/drivers/cpufreq/cpufreq_lazy.c new file mode 100644 index 00000000..82bfe8a9 --- /dev/null +++ b/drivers/cpufreq/cpufreq_lazy.c @@ -0,0 +1,822 @@ +/* + * drivers/cpufreq/cpufreq_lazy.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * (C) 2011 Ezekeel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_HAS_EARLYSUSPEND +#include +#endif + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) +#define DEF_FREQUENCY_UP_THRESHOLD (80) +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +#define MICRO_FREQUENCY_UP_THRESHOLD (90) +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. + * All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate, current_sampling_rate; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_LAZY +static +#endif +struct cpufreq_governor cpufreq_gov_lazy = { + .name = "lazy", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_lo; + unsigned int freq_lo_jiffies; + unsigned int freq_hi_jiffies; + int cpu; + unsigned int sample_type:1; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int ignore_nice; + unsigned int powersave_bias; + unsigned int io_is_busy; + unsigned int min_timeinstate; +#ifdef CONFIG_HAS_EARLYSUSPEND + bool screenoff_maxfreq; +#endif +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .ignore_nice = 0, + .powersave_bias = 0, +#ifdef CONFIG_HAS_EARLYSUSPEND + .screenoff_maxfreq = false, +#endif +}; + +#ifdef CONFIG_HAS_EARLYSUSPEND +static bool suspended = false; + +static void lazy_early_suspend(struct early_suspend *handler) +{ + suspended = true; + + return; +} + +static void lazy_late_resume(struct early_suspend *handler) +{ + suspended = false; + + return; +} + +static struct early_suspend lazy_suspend = { + .suspend = lazy_early_suspend, + .resume = lazy_late_resume, + .level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1, +}; +#endif + +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, + cputime64_t *wall) +{ + cputime64_t idle_time; + cputime64_t cur_wall_time; + cputime64_t busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, + kstat_cpu(cpu).cpustat.system); + + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); + + idle_time = cputime64_sub(cur_wall_time, busy_time); + if (wall) + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + + return (cputime64_t)jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + + return idle_time; +} + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +/* + * Find right freq to be set now with powersave_bias on. + * Returns the freq_hi to be used right now and will set freq_hi_jiffies, + * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. + */ +static unsigned int powersave_bias_target(struct cpufreq_policy *policy, + unsigned int freq_next, + unsigned int relation) +{ + unsigned int freq_req, freq_reduc, freq_avg; + unsigned int freq_hi, freq_lo; + unsigned int index = 0; + unsigned int jiffies_total, jiffies_hi, jiffies_lo; + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, + policy->cpu); + + if (!dbs_info->freq_table) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_next; + } + + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, + relation, &index); + freq_req = dbs_info->freq_table[index].frequency; + freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; + freq_avg = freq_req - freq_reduc; + + /* Find freq bounds for freq_avg in freq_table */ + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_H, &index); + freq_lo = dbs_info->freq_table[index].frequency; + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_L, &index); + freq_hi = dbs_info->freq_table[index].frequency; + + /* Find out how long we have to be in hi and lo freqs */ + if (freq_hi == freq_lo) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_lo; + } + jiffies_total = usecs_to_jiffies(current_sampling_rate); + jiffies_hi = (freq_avg - freq_lo) * jiffies_total; + jiffies_hi += ((freq_hi - freq_lo) / 2); + jiffies_hi /= (freq_hi - freq_lo); + jiffies_lo = jiffies_total - jiffies_hi; + dbs_info->freq_lo = freq_lo; + dbs_info->freq_lo_jiffies = jiffies_lo; + dbs_info->freq_hi_jiffies = jiffies_hi; + return freq_hi; +} + +static void lazy_powersave_bias_init_cpu(int cpu) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); + dbs_info->freq_lo = 0; +} + +static void lazy_powersave_bias_init(void) +{ + int i; + for_each_online_cpu(i) { + lazy_powersave_bias_init_cpu(i); + } +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_min); + +/* cpufreq_lazy Governor Tunables */ +#define show_one(file_name, object) \ + static ssize_t show_##file_name \ + (struct kobject *kobj, struct attribute *attr, char *buf) \ + { \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ + } +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(ignore_nice_load, ignore_nice); +show_one(powersave_bias, powersave_bias); +show_one(min_timeinstate, min_timeinstate); +#ifdef CONFIG_HAS_EARLYSUSPEND +show_one(screenoff_maxfreq, screenoff_maxfreq); +#endif + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.io_is_busy = !!input; + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold = input; + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + mutex_unlock(&dbs_mutex); + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + + } + return count; +} + +static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input > 1000) + input = 1000; + + dbs_tuners_ins.powersave_bias = input; + lazy_powersave_bias_init(); + return count; +} + +static ssize_t store_min_timeinstate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.min_timeinstate = max(input, min_sampling_rate); + return count; +} + +#ifdef CONFIG_HAS_EARLYSUSPEND +static ssize_t store_screenoff_maxfreq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1 || input > 1) + return -EINVAL; + dbs_tuners_ins.screenoff_maxfreq = input; + return count; +} +#endif + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(powersave_bias); +define_one_global_rw(min_timeinstate); +#ifdef CONFIG_HAS_EARLYSUSPEND +define_one_global_rw(screenoff_maxfreq); +#endif + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &up_threshold.attr, + &ignore_nice_load.attr, + &powersave_bias.attr, + &io_is_busy.attr, + &min_timeinstate.attr, +#ifdef CONFIG_HAS_EARLYSUSPEND + &screenoff_maxfreq.attr, +#endif + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "lazy", +}; + +/************************** sysfs end ************************/ + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + unsigned int max_load_freq; + + struct cpufreq_policy *policy; + unsigned int j; + + this_dbs_info->freq_lo = 0; + policy = this_dbs_info->cur_policy; + + current_sampling_rate = dbs_tuners_ins.sampling_rate; + +#ifdef CONFIG_HAS_EARLYSUSPEND + if (suspended && dbs_tuners_ins.screenoff_maxfreq) { + /* if we are already at full speed then break out early */ + if (!dbs_tuners_ins.powersave_bias) { + if (policy->cur == policy->max) + return; + + __cpufreq_driver_target(policy, policy->max, + CPUFREQ_RELATION_H); + } else { + int freq = powersave_bias_target(policy, policy->max, + CPUFREQ_RELATION_H); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } + current_sampling_rate = dbs_tuners_ins.min_timeinstate; + return; + } +#endif + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate, we look for a the lowest + * frequency which can sustain the load while keeping idle time over + * 30%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of current frequency + */ + + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load, load_freq; + int freq_avg; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, + j_dbs_info->prev_cpu_iowait); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, + j_dbs_info->prev_cpu_nice); + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + /* + * For the purpose of lazy, waiting for disk IO is an + * indication that you're performance critical, and not that + * the system is actually idle. So subtract the iowait time + * from the cpu idle time. + */ + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = load * freq_avg; + if (load_freq > max_load_freq) + max_load_freq = load_freq; + } + + /* Check for frequency increase */ + if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { + /* if we are already at full speed then break out early */ + if (!dbs_tuners_ins.powersave_bias) { + if (policy->cur == policy->max) + return; + + __cpufreq_driver_target(policy, policy->max, + CPUFREQ_RELATION_H); + } else { + int freq = powersave_bias_target(policy, policy->max, + CPUFREQ_RELATION_H); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } + current_sampling_rate = dbs_tuners_ins.min_timeinstate; + return; + } + + /* Check for frequency decrease */ + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) + return; + + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + if (freq_next < policy->min) + freq_next = policy->min; + + if (!dbs_tuners_ins.powersave_bias) { + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } else { + int freq = powersave_bias_target(policy, freq_next, + CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } + current_sampling_rate = dbs_tuners_ins.min_timeinstate; + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int delay; + int sample_type = dbs_info->sample_type; + + mutex_lock(&dbs_info->timer_mutex); + + /* Common NORMAL_SAMPLE setup */ + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + if (!dbs_tuners_ins.powersave_bias || + sample_type == DBS_NORMAL_SAMPLE) { + dbs_check_cpu(dbs_info); + if (dbs_info->freq_lo) { + /* Setup timer for SUB_SAMPLE */ + dbs_info->sample_type = DBS_SUB_SAMPLE; + delay = dbs_info->freq_hi_jiffies; + } else { + delay = usecs_to_jiffies(current_sampling_rate); + if (num_online_cpus() > 1) + delay -= jiffies % delay; + } + } else { + __cpufreq_driver_target(dbs_info->cur_policy, + dbs_info->freq_lo, CPUFREQ_RELATION_H); + delay = usecs_to_jiffies(current_sampling_rate); + if (num_online_cpus() > 1) + delay -= jiffies % delay; + } + schedule_delayed_work_on(cpu, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(current_sampling_rate); + delay -= jiffies % delay; + + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); +} + +/* + * Not all CPUs want IO time to be accounted as busy; this dependson how + * efficient idling at a higher frequency/voltage is. + * Pavel Machek says this is not so for various generations of AMD and old + * Intel systems. + * Mike Chan (androidlcom) calis this is also not true for ARM. + * Because of this, whitelist specific known (series) of CPUs by default, and + * leave all others up to the user. + */ +static int should_io_be_busy(void) +{ +#if defined(CONFIG_X86) + /* + * For Intel, Core 2 (model 15) andl later have an efficient idle. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model >= 15) + return 1; +#endif + return 0; +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kstat_cpu(j).cpustat.nice; + } + } + this_dbs_info->cpu = cpu; + lazy_powersave_bias_init_cpu(cpu); + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = min_sampling_rate; + current_sampling_rate = dbs_tuners_ins.sampling_rate; + dbs_tuners_ins.min_timeinstate = latency * LATENCY_MULTIPLIER; + dbs_tuners_ins.io_is_busy = should_io_be_busy(); + } + mutex_unlock(&dbs_mutex); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + mutex_destroy(&this_dbs_info->timer_mutex); + dbs_enable--; + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +static int __init cpufreq_gov_dbs_init(void) +{ + cputime64_t wall; + u64 idle_time; + int cpu = get_cpu(); + + idle_time = get_cpu_idle_time_us(cpu, &wall); + put_cpu(); + if (idle_time != -1ULL) { + /* Idle micro accounting is supported. Use finer thresholds */ + dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + dbs_tuners_ins.down_differential = + MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + /* + * In no_hz/micro accounting case we set the minimum frequency + * not depending on HZ, but fixed (very low). The deferred + * timer might skip some samples if idle/sleeping as needed. + */ + min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + } else { + /* For correct statistics, we need 10 ticks for each measure */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + } + +#ifdef CONFIG_HAS_EARLYSUSPEND + register_early_suspend(&lazy_suspend); +#endif + + return cpufreq_register_governor(&cpufreq_gov_lazy); +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_lazy); +} + + +MODULE_AUTHOR("Venkatesh Pallipadi "); +MODULE_AUTHOR("Alexey Starikovskiy "); +MODULE_AUTHOR("Ezekeel "); +MODULE_DESCRIPTION("'cpufreq_lazy' - A dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_LAZY +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index a301beed..dceb54ca 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -382,6 +382,9 @@ extern struct cpufreq_governor cpufreq_gov_lulzactive; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS) extern struct cpufreq_governor cpufreq_gov_smartass; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_smartass) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_LAZY) +extern struct cpufreq_governor cpufreq_gov_lazy; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_lazy) #endif From 0c43aba71595c7056ac8039839c428b3ef4d8e39 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 09:34:30 -0400 Subject: [PATCH 09/35] Fixed compile error (Lazy) Conflicts: include/linux/cpufreq.h --- drivers/cpufreq/Makefile | 3 --- include/linux/cpufreq.h | 7 ++++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index b529a80f..da12e97c 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -22,17 +22,14 @@ obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o -<<<<<<< HEAD obj-$(CONFIG_CPU_FREQ_GOV_MINMAX) += cpufreq_minmax.o obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS2) += cpufreq_smartass2.o obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS) += cpufreq_smartass.o obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVEX) += cpufreq_interactivex.o obj-$(CONFIG_CPU_FREQ_GOV_LAGFREE) += cpufreq_lagfree.o obj-$(CONFIG_CPU_FREQ_GOV_LULZACTIVE) += cpufreq_lulzactive.o -======= obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o obj-$(CONFIG_CPU_FREQ_GOV_LAZY) += cpufreq_lazy.o ->>>>>>> 17381f7... Added CPUfreq governor 'lazy'. # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index dceb54ca..ec8dea51 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -396,8 +396,9 @@ extern struct cpufreq_governor cpufreq_gov_lazy; #define CPUFREQ_TABLE_END ~1 struct cpufreq_frequency_table { - unsigned int index; - unsigned int frequency; + unsigned int index; /* any */ + unsigned int frequency; /* kHz - doesn't need to be in ascending + * order */ }; int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, @@ -426,4 +427,4 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, void cpufreq_frequency_table_put_attr(unsigned int cpu); -#endif +#endif /* _LINUX_CPUFREQ_H */ From ea71572ec635fa9907b87b6b0892cf52ee6804bf Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 09:41:44 -0400 Subject: [PATCH 10/35] Added Intellidemand, Scary Governor Conflicts: drivers/cpufreq/cpufreq_intellidemand.c --- drivers/cpufreq/cpufreq_scary.c | 744 ++++++++++++++++++++++++++++++++ 1 file changed, 744 insertions(+) create mode 100644 drivers/cpufreq/cpufreq_scary.c diff --git a/drivers/cpufreq/cpufreq_scary.c b/drivers/cpufreq/cpufreq_scary.c new file mode 100644 index 00000000..c5670f97 --- /dev/null +++ b/drivers/cpufreq/cpufreq_scary.c @@ -0,0 +1,744 @@ +/* + Scary governor based off of conservatives source with some of smartasses features + + For devs - If you're going to port this driver to other devices, make sure to edit the default sleep frequencies & prev frequencies or else you might be going outside your devices hardware limits. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_UP_THRESHOLD (80) +#define DEF_FREQUENCY_DOWN_THRESHOLD (45) +#define DEFAULT_SLEEP_MAX_FREQ 245760 +#define DEFAULT_SLEEP_MIN_FREQ 122880 +#define DEFAULT_SLEEP_PREV_FREQ 122880 //This is so that if there are any issues resulting in sleep_prev_freq getting set, there will be a backup freq +#define DEFAULT_PREV_MAX 614400 +static unsigned int suspended; +static unsigned int sleep_max_freq=DEFAULT_SLEEP_MAX_FREQ; +static unsigned int sleep_min_freq=DEFAULT_SLEEP_MIN_FREQ; +static unsigned int sleep_prev_freq=DEFAULT_SLEEP_PREV_FREQ; +static unsigned int sleep_prev_max=DEFAULT_PREV_MAX; + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. + * All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (10) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + + +static void do_dbs_timer(struct work_struct *work); + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + unsigned int down_skip; + unsigned int requested_freq; + int cpu; + unsigned int enable:1; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on + * different CPUs. It protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct workqueue_struct *kconservative_wq; + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int ignore_nice; + unsigned int freq_step; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .ignore_nice = 0, + .freq_step = 5, +}; + +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, + cputime64_t *wall) +{ + cputime64_t idle_time; + cputime64_t cur_wall_time; + cputime64_t busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, + kstat_cpu(cpu).cpustat.system); + + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); + + idle_time = cputime64_sub(cur_wall_time, busy_time); + if (wall) + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + + return (cputime64_t)jiffies_to_usecs(idle_time);; +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + + return idle_time; +} + +/* keep track of frequency transitions */ +static int +dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, + freq->cpu); + + struct cpufreq_policy *policy; + + if (!this_dbs_info->enable) + return 0; + + policy = this_dbs_info->cur_policy; + + /* + * we only care if our internally tracked freq moves outside + * the 'valid' ranges of freqency available to us otherwise + * we do not change it + */ + if (this_dbs_info->requested_freq > policy->max + || this_dbs_info->requested_freq < policy->min) + this_dbs_info->requested_freq = freq->new; + + return 0; +} + +static struct notifier_block dbs_cpufreq_notifier_block = { + .notifier_call = dbs_cpufreq_notifier +}; + +/************************** sysfs interface ************************/ +static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) +{ + printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max " + "sysfs file is deprecated - used by: %s\n", current->comm); + return sprintf(buf, "%u\n", -1U); +} + +static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +#define define_one_ro(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +define_one_ro(sampling_rate_max); +define_one_ro(sampling_rate_min); + +/* cpufreq_conservative Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct cpufreq_policy *unused, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(sampling_down_factor, sampling_down_factor); +show_one(up_threshold, up_threshold); +show_one(down_threshold, down_threshold); +show_one(ignore_nice_load, ignore_nice); +show_one(freq_step, freq_step); + +static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_down_factor = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_sampling_rate(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_up_threshold(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input > 100 || + input <= dbs_tuners_ins.down_threshold) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.up_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_down_threshold(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + mutex_lock(&dbs_mutex); + /* cannot be lower than 11 otherwise freq will not fall */ + if (ret != 1 || input < 11 || input > 100 || + input >= dbs_tuners_ins.up_threshold) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.down_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + mutex_lock(&dbs_mutex); + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + mutex_unlock(&dbs_mutex); + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(cs_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + } + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_freq_step(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input > 100) + input = 100; + + /* no need to test here if freq_step is zero as the user might actually + * want this, they would be crazy though :) */ + mutex_lock(&dbs_mutex); + dbs_tuners_ins.freq_step = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +#define define_one_rw(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + +define_one_rw(sampling_rate); +define_one_rw(sampling_down_factor); +define_one_rw(up_threshold); +define_one_rw(down_threshold); +define_one_rw(ignore_nice_load); +define_one_rw(freq_step); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_max.attr, + &sampling_rate_min.attr, + &sampling_rate.attr, + &sampling_down_factor.attr, + &up_threshold.attr, + &down_threshold.attr, + &ignore_nice_load.attr, + &freq_step.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "scary", +}; + +/************************** sysfs end ************************/ + +/********** Porting smartass code for suspension**********/ +static void smartass_suspend(int cpu, int suspend) +{ + struct cpu_dbs_info_s *this_smartass = &per_cpu(cs_cpu_dbs_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + unsigned int new_freq; + + if (!this_smartass->enable || sleep_max_freq==0) // disable behavior for sleep_max_freq==0 + return; + + if (suspend) + { + //If the current min speed is greater than the max sleep, we reset the min to 120mhz, for battery savings + if (policy->min >= sleep_max_freq) + { + sleep_prev_freq=policy->min; + policy->min= sleep_min_freq; + } + if (policy->max > sleep_max_freq) + { + sleep_prev_max=policy->max; + policy->max=sleep_max_freq; + } + if (policy->cur > sleep_max_freq) + { + new_freq = sleep_max_freq; + if (new_freq > policy->max) + new_freq = policy->max; + if (new_freq < policy->min) + new_freq = policy->min; + __cpufreq_driver_target(policy, new_freq,CPUFREQ_RELATION_H); + } + + } + else //Resetting the min speed + { + if (policy->min < sleep_prev_freq) + policy->min=sleep_prev_freq; + if (policy->max < sleep_prev_max) + policy->max=sleep_prev_max; + } + +} + +static void smartass_early_suspend(struct early_suspend *handler) +{ + int i; + suspended = 1; + for_each_online_cpu(i) + smartass_suspend(i,1); +} + +static void smartass_late_resume(struct early_suspend *handler) +{ + int i; + suspended = 0; + for_each_online_cpu(i) + smartass_suspend(i,0); +} + +static struct early_suspend smartass_power_suspend = +{ + .suspend = smartass_early_suspend, + .resume = smartass_late_resume, +}; + + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + //Current freq +// unsigned int new_freq; + unsigned int load = 0; + unsigned int freq_target; + + struct cpufreq_policy *policy; + unsigned int j; + + policy = this_dbs_info->cur_policy; + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate*sampling_down_factor, we check, if current + * idle time is more than 80%, then we try to decrease frequency + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of maximum frequency + */ + + /* Get Absolute Load */ + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time; + unsigned int idle_time, wall_time; + + j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, + j_dbs_info->prev_cpu_nice); + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + } + + /* + * break out if we 'cannot' reduce the speed as the user might + * want freq_step to be zero + */ + if (dbs_tuners_ins.freq_step == 0) + return; + + /* Check for frequency increase */ + if (load > dbs_tuners_ins.up_threshold) + { + this_dbs_info->down_skip = 0; + + /* if we are already at full speed then break out early */ + if (this_dbs_info->requested_freq == policy->max) + return; + freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; + /* max freq cannot be less than 100. but who knows.... */ + if (unlikely(freq_target == 0)) + freq_target = 5; + + this_dbs_info->requested_freq += freq_target; + if (this_dbs_info->requested_freq > policy->max) + this_dbs_info->requested_freq = policy->max; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq,CPUFREQ_RELATION_H); + + return; + } + + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (load < (dbs_tuners_ins.down_threshold - 10)) { + freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; + + this_dbs_info->requested_freq -= freq_target; + if (this_dbs_info->requested_freq < policy->min) + this_dbs_info->requested_freq = policy->min; + + /* + * if we cannot reduce the frequency anymore, break out early + */ + if (policy->cur == policy->min) + return; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, + CPUFREQ_RELATION_H); + return; + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + delay -= jiffies % delay; + + mutex_lock(&dbs_info->timer_mutex); + + dbs_check_cpu(dbs_info); + + queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + delay -= jiffies % delay; + + dbs_info->enable = 1; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, + delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + dbs_info->enable = 0; + cancel_delayed_work_sync(&dbs_info->work); +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + suspended=0; + + this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + + rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kstat_cpu(j).cpustat.nice; + } + } + this_dbs_info->down_skip = 0; + this_dbs_info->requested_freq = policy->cur; + + mutex_init(&this_dbs_info->timer_mutex); + dbs_enable++; + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + + /* + * conservative does not implement micro like ondemand + * governor, thus we are bound to jiffes/HZ + */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); + + cpufreq_register_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + mutex_unlock(&dbs_mutex); + + dbs_timer_init(this_dbs_info); + + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + sysfs_remove_group(&policy->kobj, &dbs_attr_group); + dbs_enable--; + mutex_destroy(&this_dbs_info->timer_mutex); + + /* + * Stop the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 0) + cpufreq_unregister_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + mutex_unlock(&dbs_mutex); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target( + this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target( + this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + + break; + } + return 0; +} + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_Scary +static +#endif +struct cpufreq_governor cpufreq_gov_scary = { + .name = "Scary", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +static int __init cpufreq_gov_dbs_init(void) +{ + int err; + + kconservative_wq = create_workqueue("kconservative"); + if (!kconservative_wq) { + printk(KERN_ERR "Creation of kconservative failed\n"); + return -EFAULT; + } + register_early_suspend(&smartass_power_suspend); + err = cpufreq_register_governor(&cpufreq_gov_scary); + if (err) + destroy_workqueue(kconservative_wq); + + return err; +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_scary); + destroy_workqueue(kconservative_wq); +} + + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCARY +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); + From 26eb4d4d647ba9605db70f52b56eb22e57349dda Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 09:42:32 -0400 Subject: [PATCH 11/35] Revert "Added Intellidemand, Scary Governor" This reverts commit ea71572ec635fa9907b87b6b0892cf52ee6804bf. --- drivers/cpufreq/cpufreq_scary.c | 744 -------------------------------- 1 file changed, 744 deletions(-) delete mode 100644 drivers/cpufreq/cpufreq_scary.c diff --git a/drivers/cpufreq/cpufreq_scary.c b/drivers/cpufreq/cpufreq_scary.c deleted file mode 100644 index c5670f97..00000000 --- a/drivers/cpufreq/cpufreq_scary.c +++ /dev/null @@ -1,744 +0,0 @@ -/* - Scary governor based off of conservatives source with some of smartasses features - - For devs - If you're going to port this driver to other devices, make sure to edit the default sleep frequencies & prev frequencies or else you might be going outside your devices hardware limits. -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ - -#define DEF_FREQUENCY_UP_THRESHOLD (80) -#define DEF_FREQUENCY_DOWN_THRESHOLD (45) -#define DEFAULT_SLEEP_MAX_FREQ 245760 -#define DEFAULT_SLEEP_MIN_FREQ 122880 -#define DEFAULT_SLEEP_PREV_FREQ 122880 //This is so that if there are any issues resulting in sleep_prev_freq getting set, there will be a backup freq -#define DEFAULT_PREV_MAX 614400 -static unsigned int suspended; -static unsigned int sleep_max_freq=DEFAULT_SLEEP_MAX_FREQ; -static unsigned int sleep_min_freq=DEFAULT_SLEEP_MIN_FREQ; -static unsigned int sleep_prev_freq=DEFAULT_SLEEP_PREV_FREQ; -static unsigned int sleep_prev_max=DEFAULT_PREV_MAX; - -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) - * this governor will not work. - * All times here are in uS. - */ -#define MIN_SAMPLING_RATE_RATIO (2) - -static unsigned int min_sampling_rate; - -#define LATENCY_MULTIPLIER (1000) -#define MIN_LATENCY_MULTIPLIER (100) -#define DEF_SAMPLING_DOWN_FACTOR (1) -#define MAX_SAMPLING_DOWN_FACTOR (10) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) - - -static void do_dbs_timer(struct work_struct *work); - -struct cpu_dbs_info_s { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - unsigned int down_skip; - unsigned int requested_freq; - int cpu; - unsigned int enable:1; - /* - * percpu mutex that serializes governor limit change with - * do_dbs_timer invocation. We do not want do_dbs_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - -/* - * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on - * different CPUs. It protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); - -static struct workqueue_struct *kconservative_wq; - -static struct dbs_tuners { - unsigned int sampling_rate; - unsigned int sampling_down_factor; - unsigned int up_threshold; - unsigned int down_threshold; - unsigned int ignore_nice; - unsigned int freq_step; -} dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, - .ignore_nice = 0, - .freq_step = 5, -}; - -static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, - cputime64_t *wall) -{ - cputime64_t idle_time; - cputime64_t cur_wall_time; - cputime64_t busy_time; - - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, - kstat_cpu(cpu).cpustat.system); - - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); - - idle_time = cputime64_sub(cur_wall_time, busy_time); - if (wall) - *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); - - return (cputime64_t)jiffies_to_usecs(idle_time);; -} - -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) -{ - u64 idle_time = get_cpu_idle_time_us(cpu, wall); - - if (idle_time == -1ULL) - return get_cpu_idle_time_jiffy(cpu, wall); - - return idle_time; -} - -/* keep track of frequency transitions */ -static int -dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, - void *data) -{ - struct cpufreq_freqs *freq = data; - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, - freq->cpu); - - struct cpufreq_policy *policy; - - if (!this_dbs_info->enable) - return 0; - - policy = this_dbs_info->cur_policy; - - /* - * we only care if our internally tracked freq moves outside - * the 'valid' ranges of freqency available to us otherwise - * we do not change it - */ - if (this_dbs_info->requested_freq > policy->max - || this_dbs_info->requested_freq < policy->min) - this_dbs_info->requested_freq = freq->new; - - return 0; -} - -static struct notifier_block dbs_cpufreq_notifier_block = { - .notifier_call = dbs_cpufreq_notifier -}; - -/************************** sysfs interface ************************/ -static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) -{ - printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max " - "sysfs file is deprecated - used by: %s\n", current->comm); - return sprintf(buf, "%u\n", -1U); -} - -static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", min_sampling_rate); -} - -#define define_one_ro(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0444, show_##_name, NULL) - -define_one_ro(sampling_rate_max); -define_one_ro(sampling_rate_min); - -/* cpufreq_conservative Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct cpufreq_policy *unused, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(sampling_down_factor, sampling_down_factor); -show_one(up_threshold, up_threshold); -show_one(down_threshold, down_threshold); -show_one(ignore_nice_load, ignore_nice); -show_one(freq_step, freq_step); - -static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.sampling_down_factor = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_sampling_rate(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_up_threshold(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - mutex_lock(&dbs_mutex); - if (ret != 1 || input > 100 || - input <= dbs_tuners_ins.down_threshold) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.up_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_down_threshold(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - mutex_lock(&dbs_mutex); - /* cannot be lower than 11 otherwise freq will not fall */ - if (ret != 1 || input < 11 || input > 100 || - input >= dbs_tuners_ins.up_threshold) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.down_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - mutex_lock(&dbs_mutex); - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - mutex_unlock(&dbs_mutex); - return count; - } - dbs_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(cs_cpu_dbs_info, j); - dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; - } - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_freq_step(struct cpufreq_policy *policy, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - if (input > 100) - input = 100; - - /* no need to test here if freq_step is zero as the user might actually - * want this, they would be crazy though :) */ - mutex_lock(&dbs_mutex); - dbs_tuners_ins.freq_step = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -#define define_one_rw(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0644, show_##_name, store_##_name) - -define_one_rw(sampling_rate); -define_one_rw(sampling_down_factor); -define_one_rw(up_threshold); -define_one_rw(down_threshold); -define_one_rw(ignore_nice_load); -define_one_rw(freq_step); - -static struct attribute *dbs_attributes[] = { - &sampling_rate_max.attr, - &sampling_rate_min.attr, - &sampling_rate.attr, - &sampling_down_factor.attr, - &up_threshold.attr, - &down_threshold.attr, - &ignore_nice_load.attr, - &freq_step.attr, - NULL -}; - -static struct attribute_group dbs_attr_group = { - .attrs = dbs_attributes, - .name = "scary", -}; - -/************************** sysfs end ************************/ - -/********** Porting smartass code for suspension**********/ -static void smartass_suspend(int cpu, int suspend) -{ - struct cpu_dbs_info_s *this_smartass = &per_cpu(cs_cpu_dbs_info, smp_processor_id()); - struct cpufreq_policy *policy = this_smartass->cur_policy; - unsigned int new_freq; - - if (!this_smartass->enable || sleep_max_freq==0) // disable behavior for sleep_max_freq==0 - return; - - if (suspend) - { - //If the current min speed is greater than the max sleep, we reset the min to 120mhz, for battery savings - if (policy->min >= sleep_max_freq) - { - sleep_prev_freq=policy->min; - policy->min= sleep_min_freq; - } - if (policy->max > sleep_max_freq) - { - sleep_prev_max=policy->max; - policy->max=sleep_max_freq; - } - if (policy->cur > sleep_max_freq) - { - new_freq = sleep_max_freq; - if (new_freq > policy->max) - new_freq = policy->max; - if (new_freq < policy->min) - new_freq = policy->min; - __cpufreq_driver_target(policy, new_freq,CPUFREQ_RELATION_H); - } - - } - else //Resetting the min speed - { - if (policy->min < sleep_prev_freq) - policy->min=sleep_prev_freq; - if (policy->max < sleep_prev_max) - policy->max=sleep_prev_max; - } - -} - -static void smartass_early_suspend(struct early_suspend *handler) -{ - int i; - suspended = 1; - for_each_online_cpu(i) - smartass_suspend(i,1); -} - -static void smartass_late_resume(struct early_suspend *handler) -{ - int i; - suspended = 0; - for_each_online_cpu(i) - smartass_suspend(i,0); -} - -static struct early_suspend smartass_power_suspend = -{ - .suspend = smartass_early_suspend, - .resume = smartass_late_resume, -}; - - -static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) -{ - //Current freq -// unsigned int new_freq; - unsigned int load = 0; - unsigned int freq_target; - - struct cpufreq_policy *policy; - unsigned int j; - - policy = this_dbs_info->cur_policy; - - /* - * Every sampling_rate, we check, if current idle time is less - * than 20% (default), then we try to increase frequency - * Every sampling_rate*sampling_down_factor, we check, if current - * idle time is more than 80%, then we try to decrease frequency - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of maximum frequency - */ - - /* Get Absolute Load */ - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time; - unsigned int idle_time, wall_time; - - j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); - - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - - wall_time = (unsigned int) cputime64_sub(cur_wall_time, - j_dbs_info->prev_cpu_wall); - j_dbs_info->prev_cpu_wall = cur_wall_time; - - idle_time = (unsigned int) cputime64_sub(cur_idle_time, - j_dbs_info->prev_cpu_idle); - j_dbs_info->prev_cpu_idle = cur_idle_time; - - if (dbs_tuners_ins.ignore_nice) { - cputime64_t cur_nice; - unsigned long cur_nice_jiffies; - - cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, - j_dbs_info->prev_cpu_nice); - /* - * Assumption: nice time between sampling periods will - * be less than 2^32 jiffies for 32 bit sys - */ - cur_nice_jiffies = (unsigned long) - cputime64_to_jiffies64(cur_nice); - - j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; - idle_time += jiffies_to_usecs(cur_nice_jiffies); - } - - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - load = 100 * (wall_time - idle_time) / wall_time; - } - - /* - * break out if we 'cannot' reduce the speed as the user might - * want freq_step to be zero - */ - if (dbs_tuners_ins.freq_step == 0) - return; - - /* Check for frequency increase */ - if (load > dbs_tuners_ins.up_threshold) - { - this_dbs_info->down_skip = 0; - - /* if we are already at full speed then break out early */ - if (this_dbs_info->requested_freq == policy->max) - return; - freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - /* max freq cannot be less than 100. but who knows.... */ - if (unlikely(freq_target == 0)) - freq_target = 5; - - this_dbs_info->requested_freq += freq_target; - if (this_dbs_info->requested_freq > policy->max) - this_dbs_info->requested_freq = policy->max; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq,CPUFREQ_RELATION_H); - - return; - } - - /* - * The optimal frequency is the frequency that is the lowest that - * can support the current CPU usage without triggering the up - * policy. To be safe, we focus 10 points under the threshold. - */ - if (load < (dbs_tuners_ins.down_threshold - 10)) { - freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - - this_dbs_info->requested_freq -= freq_target; - if (this_dbs_info->requested_freq < policy->min) - this_dbs_info->requested_freq = policy->min; - - /* - * if we cannot reduce the frequency anymore, break out early - */ - if (policy->cur == policy->min) - return; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, - CPUFREQ_RELATION_H); - return; - } -} - -static void do_dbs_timer(struct work_struct *work) -{ - struct cpu_dbs_info_s *dbs_info = - container_of(work, struct cpu_dbs_info_s, work.work); - unsigned int cpu = dbs_info->cpu; - - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - delay -= jiffies % delay; - - mutex_lock(&dbs_info->timer_mutex); - - dbs_check_cpu(dbs_info); - - queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); - mutex_unlock(&dbs_info->timer_mutex); -} - -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) -{ - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - delay -= jiffies % delay; - - dbs_info->enable = 1; - INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, - delay); -} - -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) -{ - dbs_info->enable = 0; - cancel_delayed_work_sync(&dbs_info->work); -} - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - suspended=0; - - this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - mutex_lock(&dbs_mutex); - - rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) { - j_dbs_info->prev_cpu_nice = - kstat_cpu(j).cpustat.nice; - } - } - this_dbs_info->down_skip = 0; - this_dbs_info->requested_freq = policy->cur; - - mutex_init(&this_dbs_info->timer_mutex); - dbs_enable++; - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - unsigned int latency; - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - - /* - * conservative does not implement micro like ondemand - * governor, thus we are bound to jiffes/HZ - */ - min_sampling_rate = - MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); - /* Bring kernel and HW constraints together */ - min_sampling_rate = max(min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - dbs_tuners_ins.sampling_rate = - max(min_sampling_rate, - latency * LATENCY_MULTIPLIER); - - cpufreq_register_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } - mutex_unlock(&dbs_mutex); - - dbs_timer_init(this_dbs_info); - - break; - - case CPUFREQ_GOV_STOP: - dbs_timer_exit(this_dbs_info); - - mutex_lock(&dbs_mutex); - sysfs_remove_group(&policy->kobj, &dbs_attr_group); - dbs_enable--; - mutex_destroy(&this_dbs_info->timer_mutex); - - /* - * Stop the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 0) - cpufreq_unregister_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - - mutex_unlock(&dbs_mutex); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_dbs_info->timer_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&this_dbs_info->timer_mutex); - - break; - } - return 0; -} - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_Scary -static -#endif -struct cpufreq_governor cpufreq_gov_scary = { - .name = "Scary", - .governor = cpufreq_governor_dbs, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, -}; - -static int __init cpufreq_gov_dbs_init(void) -{ - int err; - - kconservative_wq = create_workqueue("kconservative"); - if (!kconservative_wq) { - printk(KERN_ERR "Creation of kconservative failed\n"); - return -EFAULT; - } - register_early_suspend(&smartass_power_suspend); - err = cpufreq_register_governor(&cpufreq_gov_scary); - if (err) - destroy_workqueue(kconservative_wq); - - return err; -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_scary); - destroy_workqueue(kconservative_wq); -} - - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCARY -fs_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); -#endif -module_exit(cpufreq_gov_dbs_exit); - From 7f1503e80dde3441bb4d788ed5ddcc1f3e143b5d Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 09:57:04 -0400 Subject: [PATCH 12/35] Added OndemandX governor and cleanup of cpu driver files Conflicts: drivers/cpufreq/Kconfig drivers/cpufreq/Makefile include/linux/cpufreq.h --- drivers/cpufreq/Kconfig | 80 ++- drivers/cpufreq/Makefile | 6 + drivers/cpufreq/cpufreq_ondemandx.c | 829 ++++++++++++++++++++++++++++ drivers/cpufreq/cpufreq_smartass2.c | 22 +- include/linux/cpufreq.h | 15 + 5 files changed, 921 insertions(+), 31 deletions(-) create mode 100644 drivers/cpufreq/cpufreq_ondemandx.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index ba4d2b48..3d6183ff 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -305,31 +305,41 @@ config CPU_FREQ_DEFAULT_GOV_SMARTASS endchoice -config CPU_FREQ_GOV_DANCEDANCE - tristate "'dancedance' cpufreq governor" - depends on CPU_FREQ +config CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND + bool "intellidemand" + select CPU_FREQ_GOV_INTELLIDEMAND + select CPU_FREQ_GOV_PERFORMANCE + help + Intelligent OnDemand Govneror based on Samsung Patched OnDemand -config CPU_FREQ_GOV_NIGHTMARE - tristate "'nightmare' cpufreq governor" - depends on CPU_FREQ +config CPU_FREQ_DEFAULT_GOV_SCARY + bool "scary" + select CPU_FREQ_GOV_SCARY + select CPU_FREQ_GOV_SCARY + help + Use the CPUFreq governor 'scary' as default. -config CPU_FREQ_GOV_ONDEMAND - tristate "'ondemand' cpufreq policy governor" - select CPU_FREQ_TABLE - help - 'ondemand' - This driver adds a dynamic cpufreq policy governor. - The governor does a periodic polling and - changes frequency based on the CPU utilization. - The support for this governor depends on CPU capability to - do fast frequency switching (i.e, very low latency frequency - transitions). +config CPU_FREQ_DEFAULT_GOV_LIONHEART + bool "lionheart" + select CPU_FREQ_GOV_LIONHEART + help + Use the CPUFreq governor 'lionheart' as default. - To compile this driver as a module, choose M here: the - module will be called cpufreq_ondemand. +config CPU_FREQ_DEFAULT_GOV_SAVAGEDZEN + bool "savagedzen" + select CPU_FREQ_GOV_SAVAGEDZEN + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lionheart' as default. - For details, take a look at linux/Documentation/cpu-freq. +config CPU_FREQ_DEFAULT_GOV_ONDEMANDX + bool "ondemandx" + select CPU_FREQ_GOV_ONDEMANDX + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lionheart' as default. - If in doubt, say N. +endchoice config CPU_FREQ_GOV_PERFORMANCE tristate "'performance' governor" @@ -505,10 +515,40 @@ config CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER Sampling latency rate multiplied by the cpu switch latency. Affects governor polling. +config CPU_FREQ_GOV_SCARY + tristate "'scary' cpufreq governor" + depends on CPU_FREQ + help + scary - a governor for cabbages + + If in doubt, say N. + config CPU_FREQ_GOV_LAZY tristate "'lazy' cpufreq governor" depends on CPU_FREQ +config CPU_FREQ_GOV_INTELLIDEMAND + tristate "'intellidemand' cpufreq governor" + depends on CPU_FREQ + help + 'intellidemand' - an intelligent ondemand governor + +config CPU_FREQ_GOV_LIONHEART + tristate "'lionheart' cpufreq governor" + depends on CPU_FREQ + +config CPU_FREQ_GOV_WHEATLEY + tristate "'wheatley' cpufreq governor" + depends on CPU_FREQ + +config CPU_FREQ_GOV_SAVAGEDZEN + tristate "'savagedzen' cpufreq governor" + depends on CPU_FREQ + +config CPU_FREQ_GOV_ONDEMANDX + tristate "'ondemandx' cpufreq governor" + depends on CPU_FREQ + menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index da12e97c..166a086e 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -30,6 +30,12 @@ obj-$(CONFIG_CPU_FREQ_GOV_LAGFREE) += cpufreq_lagfree.o obj-$(CONFIG_CPU_FREQ_GOV_LULZACTIVE) += cpufreq_lulzactive.o obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o obj-$(CONFIG_CPU_FREQ_GOV_LAZY) += cpufreq_lazy.o +obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND)+= cpufreq_intellidemand.o +obj-$(CONFIG_CPU_FREQ_GOV_SCARY) += cpufreq_scary.o +obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o +obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o +obj-$(CONFIG_CPU_FREQ_GOV_SAVAGEDZEN) += cpufreq_savagedzen.o +obj-$(CONFIG_CPU_FREQ_GOV_ONDEMANDX) += cpufreq_ondemandx.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/drivers/cpufreq/cpufreq_ondemandx.c b/drivers/cpufreq/cpufreq_ondemandx.c new file mode 100644 index 00000000..31e9b99a --- /dev/null +++ b/drivers/cpufreq/cpufreq_ondemandx.c @@ -0,0 +1,829 @@ +/* + * drivers/cpufreq/cpufreq_ondemandx.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (5) +#define DEF_FREQUENCY_UP_THRESHOLD (85) +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (10) +#define MICRO_FREQUENCY_UP_THRESHOLD (80) +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) +#define DEF_SUSPEND_FREQ (500000) + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. + * All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (1) + +static unsigned int min_sampling_rate; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMANDX +static +#endif +struct cpufreq_governor cpufreq_gov_ondemandx = { + .name = "ondemandx", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_lo; + unsigned int freq_lo_jiffies; + unsigned int freq_hi_jiffies; + int cpu; + unsigned int sample_type:1; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on + * different CPUs. It protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct workqueue_struct *kondemandx_wq; + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int ignore_nice; + unsigned int powersave_bias; + unsigned int io_is_busy; + unsigned int suspend_freq; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .ignore_nice = 0, + .powersave_bias = 0, + .suspend_freq = DEF_SUSPEND_FREQ, +}; + +// used for imoseyon's mods +static unsigned int suspended = 0; +static void ondemandx_suspend(int suspend) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, smp_processor_id()); + if (dbs_enable==0) return; + if (!suspend) { // resume at max speed: + suspended = 0; + __cpufreq_driver_target(dbs_info->cur_policy, dbs_info->cur_policy->max, + CPUFREQ_RELATION_L); + pr_info("[imoseyon] ondemandx awake at %d\n", dbs_info->cur_policy->cur); + } else { + suspended = 1; + // let's give it a little breathing room + __cpufreq_driver_target(dbs_info->cur_policy, dbs_tuners_ins.suspend_freq, CPUFREQ_RELATION_H); + pr_info("[imoseyon] ondemandx suspended at %d\n", dbs_info->cur_policy->cur); + } +} + +static void ondemandx_early_suspend(struct early_suspend *handler) { + ondemandx_suspend(1); +} + +static void ondemandx_late_resume(struct early_suspend *handler) { + ondemandx_suspend(0); +} + +static struct early_suspend ondemandx_power_suspend = { + .suspend = ondemandx_early_suspend, + .resume = ondemandx_late_resume, + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +}; + +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, + cputime64_t *wall) +{ + cputime64_t idle_time; + cputime64_t cur_wall_time; + cputime64_t busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, + kstat_cpu(cpu).cpustat.system); + + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); + + idle_time = cputime64_sub(cur_wall_time, busy_time); + if (wall) + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + + return (cputime64_t)jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + + return idle_time; +} + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +/* + * Find right freq to be set now with powersave_bias on. + * Returns the freq_hi to be used right now and will set freq_hi_jiffies, + * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. + */ +static unsigned int powersave_bias_target(struct cpufreq_policy *policy, + unsigned int freq_next, + unsigned int relation) +{ + unsigned int freq_req, freq_reduc, freq_avg; + unsigned int freq_hi, freq_lo; + unsigned int index = 0; + unsigned int jiffies_total, jiffies_hi, jiffies_lo; + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, + policy->cpu); + + if (!dbs_info->freq_table) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_next; + } + + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, + relation, &index); + freq_req = dbs_info->freq_table[index].frequency; + freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; + freq_avg = freq_req - freq_reduc; + + /* Find freq bounds for freq_avg in freq_table */ + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_H, &index); + freq_lo = dbs_info->freq_table[index].frequency; + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_L, &index); + freq_hi = dbs_info->freq_table[index].frequency; + + /* Find out how long we have to be in hi and lo freqs */ + if (freq_hi == freq_lo) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_lo; + } + jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + jiffies_hi = (freq_avg - freq_lo) * jiffies_total; + jiffies_hi += ((freq_hi - freq_lo) / 2); + jiffies_hi /= (freq_hi - freq_lo); + jiffies_lo = jiffies_total - jiffies_hi; + dbs_info->freq_lo = freq_lo; + dbs_info->freq_lo_jiffies = jiffies_lo; + dbs_info->freq_hi_jiffies = jiffies_hi; + return freq_hi; +} + +static void ondemandx_powersave_bias_init_cpu(int cpu) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); + dbs_info->freq_lo = 0; +} + +static void ondemandx_powersave_bias_init(void) +{ + int i; + for_each_online_cpu(i) { + ondemandx_powersave_bias_init_cpu(i); + } +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_min); + +/* cpufreq_ondemand Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(ignore_nice_load, ignore_nice); +show_one(powersave_bias, powersave_bias); +show_one(suspend_freq, suspend_freq); + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.io_is_busy = !!input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.up_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + mutex_lock(&dbs_mutex); + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + mutex_unlock(&dbs_mutex); + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + + } + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input > 1000) + input = 1000; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.powersave_bias = input; + ondemandx_powersave_bias_init(); + mutex_unlock(&dbs_mutex); + return count; +} + +static ssize_t store_suspend_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input > 1000000) + input = 1000000; + + if (input < 200000) + input = 200000; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.suspend_freq = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(powersave_bias); +define_one_global_rw(suspend_freq); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &up_threshold.attr, + &ignore_nice_load.attr, + &powersave_bias.attr, + &io_is_busy.attr, + &suspend_freq.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "ondemandx", +}; + +/************************** sysfs end ************************/ + +static inline void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ + if (dbs_tuners_ins.powersave_bias) + freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); + //else if (p->cur == p->max) + // return; + if (suspended && freq > dbs_tuners_ins.suspend_freq) { + freq = dbs_tuners_ins.suspend_freq; + __cpufreq_driver_target(p, freq, CPUFREQ_RELATION_H); + } else + __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? + CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); +} + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + unsigned int max_load_freq; + + struct cpufreq_policy *policy; + unsigned int j; + + this_dbs_info->freq_lo = 0; + policy = this_dbs_info->cur_policy; + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate, we look for a the lowest + * frequency which can sustain the load while keeping idle time over + * 30%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of current frequency + */ + + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load, load_freq; + int freq_avg; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, + j_dbs_info->prev_cpu_iowait); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, + j_dbs_info->prev_cpu_nice); + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + /* + * For the purpose of ondemandx, waiting for disk IO is an + * indication that you're performance critical, and not that + * the system is actually idle. So subtract the iowait time + * from the cpu idle time. + */ + + if (idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = load * freq_avg; + if (load_freq > max_load_freq) + max_load_freq = load_freq; + } + + /* Check for frequency increase */ + if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { + /* If switching to max speed, apply sampling_down_factor */ + dbs_freq_increase(policy, policy->max); + return; + } + + /* Check for frequency decrease */ + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) { + __cpufreq_driver_target(policy, policy->min, + CPUFREQ_RELATION_L); + return; + } + + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + if (freq_next < policy->min) + freq_next = policy->min; + + if (!dbs_tuners_ins.powersave_bias) { + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } else { + int freq = powersave_bias_target(policy, freq_next, + CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int sample_type = dbs_info->sample_type; + + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + +#if 0 + /* Don't care too much about synchronizing the workqueue in both cpus */ + if (num_online_cpus() > 1) + delay -= jiffies % delay; +#endif + + mutex_lock(&dbs_info->timer_mutex); + + /* Common NORMAL_SAMPLE setup */ + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + if (!dbs_tuners_ins.powersave_bias || + sample_type == DBS_NORMAL_SAMPLE) { + dbs_check_cpu(dbs_info); + if (dbs_info->freq_lo) { + /* Setup timer for SUB_SAMPLE */ + dbs_info->sample_type = DBS_SUB_SAMPLE; + delay = dbs_info->freq_hi_jiffies; + } + } else { + if (!suspended) + __cpufreq_driver_target(dbs_info->cur_policy, + dbs_info->freq_lo, CPUFREQ_RELATION_H); + } + queue_delayed_work_on(cpu, kondemandx_wq, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + queue_delayed_work_on(dbs_info->cpu, kondemandx_wq, &dbs_info->work, + delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); +} + +/* + * Not all CPUs want IO time to be accounted as busy; this dependson how + * efficient idling at a higher frequency/voltage is. + * Pavel Machek says this is not so for various generations of AMD and old + * Intel systems. + * Mike Chan (androidlcom) calis this is also not true for ARM. + * Because of this, whitelist specific known (series) of CPUs by default, and + * leave all others up to the user. + */ +static int should_io_be_busy(void) +{ +#if defined(CONFIG_X86) + /* + * For Intel, Core 2 (model 15) andl later have an efficient idle. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model >= 15) + return 1; +#endif +#if defined(CONFIG_ARM) + return 1; +#endif + return 0; +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kstat_cpu(j).cpustat.nice; + } + } + this_dbs_info->cpu = cpu; + ondemandx_powersave_bias_init_cpu(cpu); + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); + dbs_tuners_ins.io_is_busy = should_io_be_busy(); + } + mutex_unlock(&dbs_mutex); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + register_early_suspend(&ondemandx_power_suspend); + pr_info("[imoseyon] ondemandx active\n"); + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + mutex_destroy(&this_dbs_info->timer_mutex); + dbs_enable--; + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + unregister_early_suspend(&ondemandx_power_suspend); + pr_info("[imoseyon] ondemandx inactive\n"); + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +static int __init cpufreq_gov_dbs_init(void) +{ + int err; + cputime64_t wall; + u64 idle_time; + int cpu = get_cpu(); + + idle_time = get_cpu_idle_time_us(cpu, &wall); + put_cpu(); + if (idle_time != -1ULL) { + /* Idle micro accounting is supported. Use finer thresholds */ + dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + dbs_tuners_ins.down_differential = + MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + /* + * In no_hz/micro accounting case we set the minimum frequency + * not depending on HZ, but fixed (very low). The deferred + * timer might skip some samples if idle/sleeping as needed. + */ + min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + } else { + /* For correct statistics, we need 10 ticks for each measure */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(1); + } + + kondemandx_wq = alloc_workqueue("kondemandx", WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1); + if (!kondemandx_wq) { + printk(KERN_ERR "Creation of kondemandx failed\n"); + return -EFAULT; + } + + err = cpufreq_register_governor(&cpufreq_gov_ondemandx); + if (err) + destroy_workqueue(kondemandx_wq); + + return err; +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + pr_info("[imoseyon] ondemandx exit\n"); + cpufreq_unregister_governor(&cpufreq_gov_ondemandx); + destroy_workqueue(kondemandx_wq); +} + + +MODULE_AUTHOR("Venkatesh Pallipadi "); +MODULE_AUTHOR("Alexey Starikovskiy "); +MODULE_DESCRIPTION("'cpufreq_ondemandx' - A dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMANDX +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_smartass2.c b/drivers/cpufreq/cpufreq_smartass2.c index 05c39ded..09d8e6e2 100644 --- a/drivers/cpufreq/cpufreq_smartass2.c +++ b/drivers/cpufreq/cpufreq_smartass2.c @@ -467,7 +467,7 @@ static ssize_t store_debug_mask(struct kobject *kobj, struct attribute *attr, co res = strict_strtoul(buf, 0, &input); if (res >= 0) debug_mask = input; - return res; + return count; } static ssize_t show_up_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) @@ -482,7 +482,7 @@ static ssize_t store_up_rate_us(struct kobject *kobj, struct attribute *attr, co res = strict_strtoul(buf, 0, &input); if (res >= 0 && input >= 0 && input <= 100000000) up_rate_us = input; - return res; + return count; } static ssize_t show_down_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) @@ -497,7 +497,7 @@ static ssize_t store_down_rate_us(struct kobject *kobj, struct attribute *attr, res = strict_strtoul(buf, 0, &input); if (res >= 0 && input >= 0 && input <= 100000000) down_rate_us = input; - return res; + return count; } static ssize_t show_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) @@ -515,7 +515,7 @@ static ssize_t store_sleep_ideal_freq(struct kobject *kobj, struct attribute *at if (suspended) smartass_update_min_max_allcpus(); } - return res; + return count; } static ssize_t show_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, char *buf) @@ -530,7 +530,7 @@ static ssize_t store_sleep_wakeup_freq(struct kobject *kobj, struct attribute *a res = strict_strtoul(buf, 0, &input); if (res >= 0 && input >= 0) sleep_wakeup_freq = input; - return res; + return count; } static ssize_t show_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) @@ -548,7 +548,7 @@ static ssize_t store_awake_ideal_freq(struct kobject *kobj, struct attribute *at if (!suspended) smartass_update_min_max_allcpus(); } - return res; + return count; } static ssize_t show_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, char *buf) @@ -563,7 +563,7 @@ static ssize_t store_sample_rate_jiffies(struct kobject *kobj, struct attribute res = strict_strtoul(buf, 0, &input); if (res >= 0 && input > 0 && input <= 1000) sample_rate_jiffies = input; - return res; + return count; } static ssize_t show_ramp_up_step(struct kobject *kobj, struct attribute *attr, char *buf) @@ -578,7 +578,7 @@ static ssize_t store_ramp_up_step(struct kobject *kobj, struct attribute *attr, res = strict_strtoul(buf, 0, &input); if (res >= 0 && input >= 0) ramp_up_step = input; - return res; + return count; } static ssize_t show_ramp_down_step(struct kobject *kobj, struct attribute *attr, char *buf) @@ -593,7 +593,7 @@ static ssize_t store_ramp_down_step(struct kobject *kobj, struct attribute *attr res = strict_strtoul(buf, 0, &input); if (res >= 0 && input >= 0) ramp_down_step = input; - return res; + return count; } static ssize_t show_max_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) @@ -608,7 +608,7 @@ static ssize_t store_max_cpu_load(struct kobject *kobj, struct attribute *attr, res = strict_strtoul(buf, 0, &input); if (res >= 0 && input > 0 && input <= 100) max_cpu_load = input; - return res; + return count; } static ssize_t show_min_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) @@ -623,7 +623,7 @@ static ssize_t store_min_cpu_load(struct kobject *kobj, struct attribute *attr, res = strict_strtoul(buf, 0, &input); if (res >= 0 && input > 0 && input < 100) min_cpu_load = input; - return res; + return count; } #define define_global_rw_attr(_name) \ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index ec8dea51..2d639de5 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -385,6 +385,21 @@ extern struct cpufreq_governor cpufreq_gov_smartass; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_LAZY) extern struct cpufreq_governor cpufreq_gov_lazy; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_lazy) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SCARY) +extern struct cpufreq_governor cpufreq_gov_scary; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_scary) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_LIONHEART) +extern struct cpufreq_governor cpufreq_gov_lionheart; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_lionheart) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_WHEATLEY) +extern struct cpufreq_governor cpufreq_gov_wheatley; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_wheatley) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SAVAGEDZEN) +extern struct cpufreq_governor cpufreq_gov_savagedzen; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_savagedzen) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMANDX) +extern struct cpufreq_governor cpufreq_gov_ondemandx; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemandx) #endif From 86da7178626c0c32bafbe53b343b7e9e3ddb6849 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 10:19:50 -0400 Subject: [PATCH 13/35] Added Brazilianwax governor --- drivers/cpufreq/Kconfig | 46 +- drivers/cpufreq/Makefile | 1 + drivers/cpufreq/cpufreq_brazilianwax.c | 824 +++++++++++++++++++++++++ include/linux/cpufreq.h | 3 + 4 files changed, 845 insertions(+), 29 deletions(-) create mode 100644 drivers/cpufreq/cpufreq_brazilianwax.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 3d6183ff..e0008dd1 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -243,22 +243,6 @@ config CPU_FREQ_GOV_CONSERVATIVE tristate "'conservative' cpufreq governor" depends on CPU_FREQ help - 'conservative' - this driver is rather similar to the 'ondemand' - governor both in its source code and its purpose, the difference is - its optimisation for better suitability in a battery powered - environment. The frequency is gracefully increased and decreased - rather than jumping to 100% when speed is required. - - If you have a desktop machine then you should really be considering - the 'ondemand' governor instead, however if you are using a laptop, - PDA or even an AMD64 based computer (due to the unacceptable - step-by-step latency issues between the minimum and maximum frequency - transitions in the CPU) you will probably want to use this governor. - - To compile this driver as a module, choose M here: the - module will be called cpufreq_conservative. - - For details, take a look at linux/Documentation/cpu-freq. config CPU_FREQ_DEFAULT_GOV_SMARTASS2 bool "smartass2" @@ -303,14 +287,11 @@ config CPU_FREQ_DEFAULT_GOV_SMARTASS help Use the CPUFreq governor 'smartass' as default. -endchoice - config CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND bool "intellidemand" select CPU_FREQ_GOV_INTELLIDEMAND select CPU_FREQ_GOV_PERFORMANCE help - Intelligent OnDemand Govneror based on Samsung Patched OnDemand config CPU_FREQ_DEFAULT_GOV_SCARY bool "scary" @@ -339,7 +320,12 @@ config CPU_FREQ_DEFAULT_GOV_ONDEMANDX help Use the CPUFreq governor 'lionheart' as default. -endchoice +config CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX + bool "brazilianwax" + select CPU_FREQ_GOV_BRAZILIANWAX + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'brazilianwax as default. config CPU_FREQ_GOV_PERFORMANCE tristate "'performance' governor" @@ -369,7 +355,6 @@ config CPU_FREQ_DEFAULT_GOV_LAZY select CPU_FREQ_GOV_PERFORMANCE help Use the CPUFreq governor 'lazy' as default. -endchoice config CPU_FREQ_GOV_SLP tristate "'slp' cpufreq policy governor" @@ -446,8 +431,6 @@ config CPU_FREQ_GOV_WHEATLEY tristate "'wheatley' cpufreq governor" depends on CPU_FREQ - If in doubt, say N. - config CPU_FREQ_GOV_SMARTASS tristate "'smartass' cpufreq governor" depends on CPU_FREQ @@ -471,9 +454,9 @@ config CPU_FREQ_GOV_SMARTASS2 'smartassV2' - a "smart" optimized governor for the hero! config CPU_FREQ_GOV_INTERACTIVEX -tristate "'interactiveX' cpufreq policy governor" - help - 'interactiveX' - Modified version of interactive with sleep+wake code. + tristate "'interactiveX' cpufreq policy governor" + help + 'interactiveX' - Modified version of interactive with sleep+wake code. config CPU_FREQ_GOV_LAGFREE tristate "'lagfree' cpufreq governor" @@ -549,6 +532,14 @@ config CPU_FREQ_GOV_ONDEMANDX tristate "'ondemandx' cpufreq governor" depends on CPU_FREQ +config CPU_FREQ_GOV_BRAZILIANWAX + tristate "'brazilianwax' cpufreq governor" + depends on CPU_FREQ + help + 'brazilianwax' - a "slightly more agressive smart" optimized governor! + If in doubt, say Y. + + menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" @@ -563,6 +554,3 @@ menu "PowerPC CPU frequency scaling drivers" depends on PPC32 || PPC64 source "drivers/cpufreq/Kconfig.powerpc" endmenu - -endif -endmenu diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 166a086e..22e5500d 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o obj-$(CONFIG_CPU_FREQ_GOV_SAVAGEDZEN) += cpufreq_savagedzen.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMANDX) += cpufreq_ondemandx.o +obj-$(CONFIG_CPU_FREQ_GOV_BRAZILIANWAX) += cpufreq_brazilianwax.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/drivers/cpufreq/cpufreq_brazilianwax.c b/drivers/cpufreq/cpufreq_brazilianwax.c new file mode 100644 index 00000000..f7c73442 --- /dev/null +++ b/drivers/cpufreq/cpufreq_brazilianwax.c @@ -0,0 +1,824 @@ +/* + * drivers/cpufreq/cpufreq_brazilianwax.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Erasmux + * + * Based on the interactive governor By Mike Chan (mike@android.com) + * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net) + * + * requires to add + * EXPORT_SYMBOL_GPL(nr_running); + * at the end of kernel/sched.c + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void (*pm_idle_old)(void); +static atomic_t active_count = ATOMIC_INIT(0); + +struct brazilianwax_info_s { + struct cpufreq_policy *cur_policy; + struct timer_list timer; + u64 time_in_idle; + u64 idle_exit_time; + u64 freq_change_time; + u64 freq_change_time_in_idle; + int cur_cpu_load; + unsigned int force_ramp_up; + unsigned int enable; + int max_speed; + int min_speed; +}; +static DEFINE_PER_CPU(struct brazilianwax_info_s, brazilianwax_info); + +/* Workqueues handle frequency scaling */ +static struct workqueue_struct *up_wq; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_work; + +static cpumask_t work_cpumask; +static unsigned int suspended; + +enum { + BRAZILIANWAX_DEBUG_JUMPS=1, + BRAZILIANWAX_DEBUG_LOAD=2 +}; + +/* + * Combination of the above debug flags. + */ +static unsigned long debug_mask; + +/* + * The minimum amount of time to spend at a frequency before we can ramp up. + */ +#define DEFAULT_UP_RATE_US 10000; +static unsigned long up_rate_us; + +/* + * The minimum amount of time to spend at a frequency before we can ramp down. + */ +#define DEFAULT_DOWN_RATE_US 20000; +static unsigned long down_rate_us; + +/* + * When ramping up frequency with no idle cycles jump to at least this frequency. + * Zero disables. Set a very high value to jump to policy max freqeuncy. + */ +#define DEFAULT_UP_MIN_FREQ 2000000 +static unsigned int up_min_freq; + +/* + * When sleep_max_freq>0 the frequency when suspended will be capped + * by this frequency. Also will wake up at max frequency of policy + * to minimize wakeup issues. + * Set sleep_max_freq=0 to disable this behavior. + */ +#define DEFAULT_SLEEP_MAX_FREQ 400000 +static unsigned int sleep_max_freq; + +/* + * The frequency to set when waking up from sleep. + * When sleep_max_freq=0 this will have no effect. + */ +#define DEFAULT_SLEEP_WAKEUP_FREQ 800000 +static unsigned int sleep_wakeup_freq; + +#define UP_THRESHOLD_FREQ 2000000 +static unsigned int threshold_freq; + +/* + * When awake_min_freq>0 the frequency when not suspended will not + * go below this frequency. + * Set awake_min_freq=0 to disable this behavior. + */ +#define DEFAULT_AWAKE_MIN_FREQ 200000 +static unsigned int awake_min_freq; + +static unsigned int suspendfreq = 400000; + +/* + * Sampling rate, I highly recommend to leave it at 2. + */ +#define DEFAULT_SAMPLE_RATE_JIFFIES 2 +static unsigned int sample_rate_jiffies; + +/* + * Minimum Freqeuncy delta when ramping up. + * zero disables and causes to always jump straight to max frequency. + */ +#define DEFAULT_RAMP_UP_STEP 600000 +static unsigned int ramp_up_step; + +/* + * Miminum Freqeuncy delta when ramping down. + * zero disables and will calculate ramp down according to load heuristic. + */ +#define DEFAULT_RAMP_DOWN_STEP 400000 +static unsigned int ramp_down_step; + +/* + * CPU freq will be increased if measured load > max_cpu_load; + */ +#define DEFAULT_MAX_CPU_LOAD 45 +static unsigned long max_cpu_load; + +#define DEFAULT_X_CPU_LOAD 70 +static unsigned long x_cpu_load; + +/* + * CPU freq will be decreased if measured load < min_cpu_load; + */ +#define DEFAULT_MIN_CPU_LOAD 25 +static unsigned long min_cpu_load; +#define RAPID_MIN_CPU_LOAD 5 +static unsigned long rapid_min_cpu_load; + + +static int cpufreq_governor_brazilianwax(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX +static +#endif +struct cpufreq_governor cpufreq_gov_brazilianwax = { + .name = "brazilianwax", + .governor = cpufreq_governor_brazilianwax, + .max_transition_latency = 9000000, + .owner = THIS_MODULE, +}; + +static void brazilianwax_update_min_max(struct brazilianwax_info_s *this_brazilianwax, struct cpufreq_policy *policy, int suspend) { + if (suspend) { + this_brazilianwax->min_speed = policy->min; + this_brazilianwax->max_speed = sleep_max_freq; +// this_brazilianwax->max_speed = // sleep_max_freq; but make sure it obeys the policy min/max +// policy->max > sleep_max_freq ? (sleep_max_freq > policy->min ? sleep_max_freq : policy->min) : policy->max; + } else { + this_brazilianwax->min_speed = // awake_min_freq; but make sure it obeys the policy min/max + policy->min < awake_min_freq ? (awake_min_freq < policy->max ? awake_min_freq : policy->max) : policy->min; + this_brazilianwax->max_speed = policy->max; + } +} + +inline static unsigned int validate_freq(struct brazilianwax_info_s *this_brazilianwax, int freq) { + if (freq > this_brazilianwax->max_speed) + return this_brazilianwax->max_speed; + if (freq < this_brazilianwax->min_speed) + return this_brazilianwax->min_speed; + return freq; +} + +static void reset_timer(unsigned long cpu, struct brazilianwax_info_s *this_brazilianwax) { + this_brazilianwax->time_in_idle = get_cpu_idle_time_us(cpu, &this_brazilianwax->idle_exit_time); + mod_timer(&this_brazilianwax->timer, jiffies + sample_rate_jiffies); +} + +static void cpufreq_brazilianwax_timer(unsigned long data) +{ + u64 delta_idle; + u64 delta_time; + int cpu_load; + u64 update_time; + u64 now_idle; + unsigned long new_rate; + + struct brazilianwax_info_s *this_brazilianwax = &per_cpu(brazilianwax_info, data); + struct cpufreq_policy *policy = this_brazilianwax->cur_policy; + + now_idle = get_cpu_idle_time_us(data, &update_time); + + if (this_brazilianwax->idle_exit_time == 0 || update_time == this_brazilianwax->idle_exit_time) + return; + + delta_idle = cputime64_sub(now_idle, this_brazilianwax->time_in_idle); + delta_time = cputime64_sub(update_time, this_brazilianwax->idle_exit_time); + //printk(KERN_INFO "brazilianwaxT: t=%llu i=%llu\n",cputime64_sub(update_time,this_brazilianwax->idle_exit_time),delta_idle); + + // If timer ran less than 1ms after short-term sample started, retry. + if (delta_time < 1000) { + if (!timer_pending(&this_brazilianwax->timer)) + reset_timer(data,this_brazilianwax); + return; + } + + if (delta_idle > delta_time) + cpu_load = 0; + else + cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time; + + if (debug_mask & BRAZILIANWAX_DEBUG_LOAD) + printk(KERN_INFO "brazilianwaxT @ %d: load %d (delta_time %llu)\n",policy->cur,cpu_load,delta_time); + + this_brazilianwax->cur_cpu_load = cpu_load; + + // Scale up if load is above max or if there where no idle cycles since coming out of idle, + // or when we are above our max speed for a very long time (should only happend if entering sleep + // at high loads) + if ((cpu_load > max_cpu_load || delta_idle == 0) && + !(policy->cur > this_brazilianwax->max_speed && + cputime64_sub(update_time, this_brazilianwax->freq_change_time) > 100*down_rate_us)) { + + if (policy->cur > this_brazilianwax->max_speed) { + reset_timer(data,this_brazilianwax); + } + + if (policy->cur == policy->max) + return; + + if (nr_running() < 1) + return; + + new_rate = up_rate_us; + + // minimize going above 1.8Ghz + if (policy->cur > up_min_freq) new_rate = 75000; + + if (cputime64_sub(update_time, this_brazilianwax->freq_change_time) < new_rate) + return; + + this_brazilianwax->force_ramp_up = 1; + cpumask_set_cpu(data, &work_cpumask); + queue_work(up_wq, &freq_scale_work); + return; + } + + /* + * There is a window where if the cpu utlization can go from low to high + * between the timer expiring, delta_idle will be > 0 and the cpu will + * be 100% busy, preventing idle from running, and this timer from + * firing. So setup another timer to fire to check cpu utlization. + * Do not setup the timer if there is no scheduled work or if at max speed. + */ + if (policy->cur < this_brazilianwax->max_speed && !timer_pending(&this_brazilianwax->timer) && nr_running() > 0) + reset_timer(data,this_brazilianwax); + + if (policy->cur == policy->min) + return; + + /* + * Do not scale down unless we have been at this frequency for the + * minimum sample time. + */ + if (cputime64_sub(update_time, this_brazilianwax->freq_change_time) < down_rate_us) + return; + + cpumask_set_cpu(data, &work_cpumask); + queue_work(down_wq, &freq_scale_work); +} + +static void cpufreq_idle(void) +{ + struct brazilianwax_info_s *this_brazilianwax = &per_cpu(brazilianwax_info, smp_processor_id()); + struct cpufreq_policy *policy = this_brazilianwax->cur_policy; + + if (!this_brazilianwax->enable) { + pm_idle_old(); + return; + } + + if (policy->cur == this_brazilianwax->min_speed && timer_pending(&this_brazilianwax->timer)) + del_timer(&this_brazilianwax->timer); + + pm_idle_old(); + + if (!timer_pending(&this_brazilianwax->timer)) + reset_timer(smp_processor_id(), this_brazilianwax); +} + +/* We use the same work function to sale up and down */ +static void cpufreq_brazilianwax_freq_change_time_work(struct work_struct *work) +{ + unsigned int cpu; + int new_freq, old_freq; + unsigned int force_ramp_up; + int cpu_load; + struct brazilianwax_info_s *this_brazilianwax; + struct cpufreq_policy *policy; + unsigned int relation = CPUFREQ_RELATION_L; + cpumask_t tmp_mask = work_cpumask; + for_each_cpu(cpu, &tmp_mask) { + this_brazilianwax = &per_cpu(brazilianwax_info, cpu); + policy = this_brazilianwax->cur_policy; + cpu_load = this_brazilianwax->cur_cpu_load; + force_ramp_up = this_brazilianwax->force_ramp_up && nr_running() > 1; + this_brazilianwax->force_ramp_up = 0; + + if (force_ramp_up || cpu_load > max_cpu_load) { + if (!suspended) { + if (force_ramp_up && up_min_freq && policy->cur < up_min_freq) { + // imoseyon - ramp up faster + new_freq = up_min_freq; + relation = CPUFREQ_RELATION_L; + } else if (ramp_up_step) { + new_freq = policy->cur + ramp_up_step; + relation = CPUFREQ_RELATION_H; + } else { + new_freq = this_brazilianwax->max_speed; + relation = CPUFREQ_RELATION_H; + } + // try to minimize going above 1.8Ghz + if ((new_freq > threshold_freq) && (cpu_load < 95)) { + new_freq = threshold_freq; + relation = CPUFREQ_RELATION_H; + } + } else { + new_freq = policy->cur + 150000; + if (new_freq > suspendfreq) new_freq = suspendfreq; + relation = CPUFREQ_RELATION_H; + } + + } else if (cpu_load < min_cpu_load) { + if (cpu_load < rapid_min_cpu_load) { + new_freq = awake_min_freq; + } else if (ramp_down_step) { + new_freq = policy->cur - ramp_down_step; + } else { + cpu_load += 100 - max_cpu_load; // dummy load. + new_freq = policy->cur * cpu_load / 100; + } + relation = CPUFREQ_RELATION_L; + } + else new_freq = policy->cur; + + old_freq = policy->cur; + new_freq = validate_freq(this_brazilianwax,new_freq); + + if (new_freq != policy->cur) { + if (debug_mask & BRAZILIANWAX_DEBUG_JUMPS) + printk(KERN_INFO "SmartassQ: jumping from %d to %d\n",policy->cur,new_freq); + + __cpufreq_driver_target(policy, new_freq, relation); + + this_brazilianwax->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu,&this_brazilianwax->freq_change_time); + + if (relation == CPUFREQ_RELATION_L && old_freq == policy->cur) { + // step down one more time + new_freq = new_freq - 100000; + __cpufreq_driver_target(policy, new_freq, relation); + this_brazilianwax->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu,&this_brazilianwax->freq_change_time); + } + if (relation == CPUFREQ_RELATION_H && old_freq == policy->cur) { + // step up one more time + new_freq = new_freq + 100000; + __cpufreq_driver_target(policy, new_freq, relation); + this_brazilianwax->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu,&this_brazilianwax->freq_change_time); + } + } + + cpumask_clear_cpu(cpu, &work_cpumask); + } +} + +static ssize_t show_debug_mask(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%lu\n", debug_mask); +} + +static ssize_t store_debug_mask(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0) + debug_mask = input; + return res; +} + +static struct freq_attr debug_mask_attr = __ATTR(debug_mask, 0644, + show_debug_mask, store_debug_mask); + +static ssize_t show_up_rate_us(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%lu\n", up_rate_us); +} + +static ssize_t store_up_rate_us(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0 && input <= 100000000) + up_rate_us = input; + return res; +} + +static struct freq_attr up_rate_us_attr = __ATTR(up_rate_us, 0644, + show_up_rate_us, store_up_rate_us); + +static ssize_t show_down_rate_us(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%lu\n", down_rate_us); +} + +static ssize_t store_down_rate_us(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0 && input <= 100000000) + down_rate_us = input; + return res; +} + +static struct freq_attr down_rate_us_attr = __ATTR(down_rate_us, 0644, + show_down_rate_us, store_down_rate_us); + +static ssize_t show_up_min_freq(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", up_min_freq); +} + +static ssize_t store_up_min_freq(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + up_min_freq = input; + return res; +} + +static struct freq_attr up_min_freq_attr = __ATTR(up_min_freq, 0644, + show_up_min_freq, store_up_min_freq); + +static ssize_t show_sleep_max_freq(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", sleep_max_freq); +} + +static ssize_t store_sleep_max_freq(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + sleep_max_freq = input; + return res; +} + +static struct freq_attr sleep_max_freq_attr = __ATTR(sleep_max_freq, 0644, + show_sleep_max_freq, store_sleep_max_freq); + +static ssize_t show_sleep_wakeup_freq(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", sleep_wakeup_freq); +} + +static ssize_t store_sleep_wakeup_freq(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + sleep_wakeup_freq = input; + return res; +} + +static struct freq_attr sleep_wakeup_freq_attr = __ATTR(sleep_wakeup_freq, 0644, + show_sleep_wakeup_freq, store_sleep_wakeup_freq); + +static ssize_t show_awake_min_freq(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", awake_min_freq); +} + +static ssize_t store_awake_min_freq(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + awake_min_freq = input; + return res; +} + +static struct freq_attr awake_min_freq_attr = __ATTR(awake_min_freq, 0644, + show_awake_min_freq, store_awake_min_freq); + +static ssize_t show_sample_rate_jiffies(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", sample_rate_jiffies); +} + +static ssize_t store_sample_rate_jiffies(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 1000) + sample_rate_jiffies = input; + return res; +} + +static struct freq_attr sample_rate_jiffies_attr = __ATTR(sample_rate_jiffies, 0644, + show_sample_rate_jiffies, store_sample_rate_jiffies); + +static ssize_t show_ramp_up_step(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", ramp_up_step); +} + +static ssize_t store_ramp_up_step(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + ramp_up_step = input; + return res; +} + +static struct freq_attr ramp_up_step_attr = __ATTR(ramp_up_step, 0644, + show_ramp_up_step, store_ramp_up_step); + +static ssize_t show_ramp_down_step(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", ramp_down_step); +} + +static ssize_t store_ramp_down_step(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + ramp_down_step = input; + return res; +} + +static struct freq_attr ramp_down_step_attr = __ATTR(ramp_down_step, 0644, + show_ramp_down_step, store_ramp_down_step); + +static ssize_t show_max_cpu_load(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%lu\n", max_cpu_load); +} + +static ssize_t store_max_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 100) + max_cpu_load = input; + return res; +} + +static struct freq_attr max_cpu_load_attr = __ATTR(max_cpu_load, 0644, + show_max_cpu_load, store_max_cpu_load); + +static ssize_t show_min_cpu_load(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%lu\n", min_cpu_load); +} + +static ssize_t store_min_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input < 100) + min_cpu_load = input; + return res; +} + +static struct freq_attr min_cpu_load_attr = __ATTR(min_cpu_load, 0644, + show_min_cpu_load, store_min_cpu_load); + +static struct attribute * brazilianwax_attributes[] = { + &debug_mask_attr.attr, + &up_rate_us_attr.attr, + &down_rate_us_attr.attr, + &up_min_freq_attr.attr, + &sleep_max_freq_attr.attr, + &sleep_wakeup_freq_attr.attr, + &awake_min_freq_attr.attr, + &sample_rate_jiffies_attr.attr, + &ramp_up_step_attr.attr, + &ramp_down_step_attr.attr, + &max_cpu_load_attr.attr, + &min_cpu_load_attr.attr, + NULL, +}; + +static struct attribute_group brazilianwax_attr_group = { + .attrs = brazilianwax_attributes, + .name = "brazilianwax", +}; + +static void brazilianwax_suspend(int cpu, int suspend) +{ + struct brazilianwax_info_s *this_brazilianwax = &per_cpu(brazilianwax_info, smp_processor_id()); + struct cpufreq_policy *policy = this_brazilianwax->cur_policy; + unsigned int new_freq; + + if (!this_brazilianwax->enable || sleep_max_freq==0) // disable behavior for sleep_max_freq==0 + return; + + brazilianwax_update_min_max(this_brazilianwax,policy,suspend); + if (!suspend) { // resume at max speed: + suspended=0; + new_freq = validate_freq(this_brazilianwax,sleep_wakeup_freq); + + if (debug_mask & BRAZILIANWAX_DEBUG_JUMPS) + printk(KERN_INFO "SmartassS: awaking at %d\n",new_freq); + + __cpufreq_driver_target(policy, new_freq, + CPUFREQ_RELATION_L); + + if (policy->cur < this_brazilianwax->max_speed && !timer_pending(&this_brazilianwax->timer)) + reset_timer(smp_processor_id(),this_brazilianwax); + pr_info("[imoseyon] brazilianwax awake at %d\n", policy->cur); + } else { + // to avoid wakeup issues with quick sleep/wakeup don't change actual frequency when entering sleep + // to allow some time to settle down. + // we reset the timer, if eventually, even at full load the timer will lower the freqeuncy. + reset_timer(smp_processor_id(),this_brazilianwax); + + this_brazilianwax->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu,&this_brazilianwax->freq_change_time); + + if (debug_mask & BRAZILIANWAX_DEBUG_JUMPS) + printk(KERN_INFO "SmartassS: suspending at %d\n",policy->cur); + __cpufreq_driver_target(policy, suspendfreq, CPUFREQ_RELATION_H); + pr_info("[imoseyon] brazilianwax suspending with %d\n", policy->cur); + suspended=1; + } +} + +static void brazilianwax_early_suspend(struct early_suspend *handler) { + int i; + for_each_online_cpu(i) + brazilianwax_suspend(i,1); +} + +static void brazilianwax_late_resume(struct early_suspend *handler) { + int i; + for_each_online_cpu(i) + brazilianwax_suspend(i,0); +} + +static struct early_suspend brazilianwax_power_suspend = { + .suspend = brazilianwax_early_suspend, + .resume = brazilianwax_late_resume, + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +}; + +static int cpufreq_governor_brazilianwax(struct cpufreq_policy *new_policy, + unsigned int event) +{ + unsigned int cpu = new_policy->cpu; + int rc; + struct brazilianwax_info_s *this_brazilianwax = &per_cpu(brazilianwax_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!new_policy->cur)) + return -EINVAL; + + /* + * Do not register the idle hook and create sysfs + * entries if we have already done so. + */ + if (atomic_inc_return(&active_count) <= 1) { + rc = sysfs_create_group(&new_policy->kobj, &brazilianwax_attr_group); + if (rc) + return rc; + pm_idle_old = pm_idle; + pm_idle = cpufreq_idle; + } + + this_brazilianwax->cur_policy = new_policy; + this_brazilianwax->enable = 1; + + // imoseyon - should only register for suspend when governor active + register_early_suspend(&brazilianwax_power_suspend); + pr_info("[imoseyon] brazilianwax active\n"); + + // notice no break here! + + case CPUFREQ_GOV_LIMITS: + brazilianwax_update_min_max(this_brazilianwax,new_policy,suspended); + if (this_brazilianwax->cur_policy->cur != this_brazilianwax->max_speed) { + if (debug_mask & BRAZILIANWAX_DEBUG_JUMPS) + printk(KERN_INFO "SmartassI: initializing to %d\n",this_brazilianwax->max_speed); + __cpufreq_driver_target(new_policy, this_brazilianwax->max_speed, CPUFREQ_RELATION_H); + } + break; + + case CPUFREQ_GOV_STOP: + del_timer(&this_brazilianwax->timer); + this_brazilianwax->enable = 0; + + if (atomic_dec_return(&active_count) > 1) + return 0; + sysfs_remove_group(&new_policy->kobj, + &brazilianwax_attr_group); + + pm_idle = pm_idle_old; + // unregister when governor exits + unregister_early_suspend(&brazilianwax_power_suspend); + pr_info("[imoseyon] brazilianwax inactive\n"); + break; + } + + return 0; +} + + +static int __init cpufreq_brazilianwax_init(void) +{ + unsigned int i; + struct brazilianwax_info_s *this_brazilianwax; + debug_mask = 0; + up_rate_us = DEFAULT_UP_RATE_US; + down_rate_us = DEFAULT_DOWN_RATE_US; + up_min_freq = DEFAULT_UP_MIN_FREQ; + sleep_max_freq = DEFAULT_SLEEP_MAX_FREQ; + sleep_wakeup_freq = DEFAULT_SLEEP_WAKEUP_FREQ; + threshold_freq = UP_THRESHOLD_FREQ; + awake_min_freq = DEFAULT_AWAKE_MIN_FREQ; + sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES; + ramp_up_step = DEFAULT_RAMP_UP_STEP; + ramp_down_step = DEFAULT_RAMP_DOWN_STEP; + max_cpu_load = DEFAULT_MAX_CPU_LOAD; + x_cpu_load = DEFAULT_X_CPU_LOAD; + min_cpu_load = DEFAULT_MIN_CPU_LOAD; + rapid_min_cpu_load = RAPID_MIN_CPU_LOAD; + + suspended = 0; + + /* Initalize per-cpu data: */ + for_each_possible_cpu(i) { + this_brazilianwax = &per_cpu(brazilianwax_info, i); + this_brazilianwax->enable = 0; + this_brazilianwax->cur_policy = 0; + this_brazilianwax->force_ramp_up = 0; + this_brazilianwax->max_speed = DEFAULT_SLEEP_WAKEUP_FREQ; + this_brazilianwax->min_speed = DEFAULT_AWAKE_MIN_FREQ; + this_brazilianwax->time_in_idle = 0; + this_brazilianwax->idle_exit_time = 0; + this_brazilianwax->freq_change_time = 0; + this_brazilianwax->freq_change_time_in_idle = 0; + this_brazilianwax->cur_cpu_load = 0; + // intialize timer: + init_timer_deferrable(&this_brazilianwax->timer); + this_brazilianwax->timer.function = cpufreq_brazilianwax_timer; + this_brazilianwax->timer.data = i; + } + + /* Scale up is high priority */ + up_wq = create_workqueue("kbrazilianwax_up"); + down_wq = create_workqueue("kbrazilianwax_down"); + + INIT_WORK(&freq_scale_work, cpufreq_brazilianwax_freq_change_time_work); + + pr_info("[imoseyon] brazilianwax enter\n"); + + return cpufreq_register_governor(&cpufreq_gov_brazilianwax); +} + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX +pure_initcall(cpufreq_brazilianwax_init); +#else +module_init(cpufreq_brazilianwax_init); +#endif + +static void __exit cpufreq_brazilianwax_exit(void) +{ + pr_info("[imoseyon] brazilianwax exit\n"); + cpufreq_unregister_governor(&cpufreq_gov_brazilianwax); + destroy_workqueue(up_wq); + destroy_workqueue(down_wq); +} + +module_exit(cpufreq_brazilianwax_exit); + +MODULE_AUTHOR ("Erasmux/imoseyon"); +MODULE_DESCRIPTION ("'cpufreq_brazilianwax' - A smart cpufreq governor optimized for the hero!"); +MODULE_LICENSE ("GPL"); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 2d639de5..8a6a819b 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -400,6 +400,9 @@ extern struct cpufreq_governor cpufreq_gov_savagedzen; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMANDX) extern struct cpufreq_governor cpufreq_gov_ondemandx; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemandx) +#elif defined(CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX) +extern struct cpufreq_governor cpufreq_gov_brazilianwax; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_brazilianwax) #endif From 8cee675cd628c83eeec000ed787ead5f8ee5127c Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 10:21:53 -0400 Subject: [PATCH 14/35] Revert "Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor" This reverts commit 7e0f70e6304a15249e64bd4d1fb5da7f5fdef83f. Conflicts: drivers/cpufreq/Kconfig drivers/cpufreq/Makefile drivers/cpufreq/cpufreq_smartass2.c include/linux/cpufreq.h --- drivers/cpufreq/Kconfig | 45 + drivers/cpufreq/Makefile | 19 +- drivers/cpufreq/cpufreq_interactivex.c | 381 -------- drivers/cpufreq/cpufreq_lagfree.c | 662 -------------- drivers/cpufreq/cpufreq_lulzactive.c | 1143 ------------------------ drivers/cpufreq/cpufreq_minmax.c | 575 ------------ drivers/cpufreq/cpufreq_smartass.c | 642 ------------- include/linux/cpufreq.h | 48 +- 8 files changed, 66 insertions(+), 3449 deletions(-) delete mode 100644 drivers/cpufreq/cpufreq_interactivex.c delete mode 100644 drivers/cpufreq/cpufreq_lagfree.c delete mode 100644 drivers/cpufreq/cpufreq_lulzactive.c delete mode 100644 drivers/cpufreq/cpufreq_minmax.c delete mode 100644 drivers/cpufreq/cpufreq_smartass.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index e0008dd1..50e1a72d 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -230,6 +230,7 @@ config CPU_FREQ_GOV_BADASS module will be called cpufreq_badass. If in doubt, say N. +<<<<<<< HEAD config CPU_FREQ_DEFAULT_GOV_MINMAX bool "minmax" select CPU_FREQ_GOV_MINMAX @@ -239,10 +240,13 @@ config CPU_FREQ_DEFAULT_GOV_MINMAX frequency jumps does by the governor. This is aimed at maximizing both perfomance and battery life. +======= +>>>>>>> parent of 7e0f70e... Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor config CPU_FREQ_GOV_CONSERVATIVE tristate "'conservative' cpufreq governor" depends on CPU_FREQ help +<<<<<<< HEAD config CPU_FREQ_DEFAULT_GOV_SMARTASS2 bool "smartass2" @@ -291,6 +295,38 @@ config CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND bool "intellidemand" select CPU_FREQ_GOV_INTELLIDEMAND select CPU_FREQ_GOV_PERFORMANCE +======= + 'conservative' - this driver is rather similar to the 'ondemand' + governor both in its source code and its purpose, the difference is + its optimisation for better suitability in a battery powered + environment. The frequency is gracefully increased and decreased + rather than jumping to 100% when speed is required. + + If you have a desktop machine then you should really be considering + the 'ondemand' governor instead, however if you are using a laptop, + PDA or even an AMD64 based computer (due to the unacceptable + step-by-step latency issues between the minimum and maximum frequency + transitions in the CPU) you will probably want to use this governor. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_conservative. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +config CPU_FREQ_GOV_DANCEDANCE + tristate "'dancedance' cpufreq governor" + depends on CPU_FREQ + +config CPU_FREQ_GOV_NIGHTMARE + tristate "'nightmare' cpufreq governor" + depends on CPU_FREQ + +config CPU_FREQ_GOV_ONDEMAND + tristate "'ondemand' cpufreq policy governor" + select CPU_FREQ_TABLE +>>>>>>> parent of 7e0f70e... Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor help config CPU_FREQ_DEFAULT_GOV_SCARY @@ -380,6 +416,7 @@ config CPU_FREQ_GOV_USERSPACE If in doubt, say Y. +<<<<<<< HEAD config CPU_FREQ_GOV_ONDEMAND tristate "'ondemand' cpufreq policy governor" select CPU_FREQ_TABLE @@ -427,10 +464,13 @@ config CPU_FREQ_GOV_LULZACTIVE help 'lulzactive' - a new interactive governor by Tegrak! +======= +>>>>>>> parent of 7e0f70e... Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor config CPU_FREQ_GOV_WHEATLEY tristate "'wheatley' cpufreq governor" depends on CPU_FREQ +<<<<<<< HEAD config CPU_FREQ_GOV_SMARTASS tristate "'smartass' cpufreq governor" depends on CPU_FREQ @@ -481,6 +521,8 @@ config CPU_FREQ_MIN_TICKS help Minimum number of ticks between polling interval for governors. +======= +>>>>>>> parent of 7e0f70e... Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor config SEC_DVFS bool "DVFS job" default n @@ -491,6 +533,7 @@ config SEC_DVFS_BOOSTER default y depends on SEC_DVFS +<<<<<<< HEAD config CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER int "Sampling rate multiplier for governors." default 1000 @@ -540,6 +583,8 @@ config CPU_FREQ_GOV_BRAZILIANWAX If in doubt, say Y. +======= +>>>>>>> parent of 7e0f70e... Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 22e5500d..34dad87b 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o +<<<<<<< HEAD obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o obj-$(CONFIG_CPU_FREQ_GOV_MINMAX) += cpufreq_minmax.o @@ -37,6 +38,8 @@ obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o obj-$(CONFIG_CPU_FREQ_GOV_SAVAGEDZEN) += cpufreq_savagedzen.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMANDX) += cpufreq_ondemandx.o obj-$(CONFIG_CPU_FREQ_GOV_BRAZILIANWAX) += cpufreq_brazilianwax.o +======= +>>>>>>> parent of 7e0f70e... Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o @@ -65,7 +68,19 @@ obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o -##################################################################################d - +################################################################################## # ARM SoC drivers obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o +obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o +obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o +obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o +obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o +obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o +obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o +obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o +obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o + +################################################################################## +# PowerPC platform drivers +obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o +obj-$(CONFIG_MSM_DCVS) += cpufreq_gov_msm.o diff --git a/drivers/cpufreq/cpufreq_interactivex.c b/drivers/cpufreq/cpufreq_interactivex.c deleted file mode 100644 index 72ca6291..00000000 --- a/drivers/cpufreq/cpufreq_interactivex.c +++ /dev/null @@ -1,381 +0,0 @@ -/* -* drivers/cpufreq/cpufreq_interactivex.c -* -* Copyright (C) 2010 Google, Inc. -* -* This software is licensed under the terms of the GNU General Public -* License version 2, as published by the Free Software Foundation, and -* may be copied, distributed, and modified under those terms. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -* GNU General Public License for more details. -* -* Author: Mike Chan (mike@android.com) - modified for suspend/wake by imoseyon -* -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -static void (*pm_idle_old)(void); -static atomic_t active_count = ATOMIC_INIT(0); - -static DEFINE_PER_CPU(struct timer_list, cpu_timer); - -static DEFINE_PER_CPU(u64, time_in_idle); -static DEFINE_PER_CPU(u64, idle_exit_time); - -static struct cpufreq_policy *policy; -static unsigned int target_freq; - -/* Workqueues handle frequency scaling */ -static struct workqueue_struct *up_wq; -static struct workqueue_struct *down_wq; -static struct work_struct freq_scale_work; - -static u64 freq_change_time; -static u64 freq_change_time_in_idle; - -static cpumask_t work_cpumask; - -static unsigned int suspended = 0; -static unsigned int enabled = 0; - -/* -* The minimum ammount of time to spend at a frequency before we can ramp down, -* default is 50ms. -*/ -#define DEFAULT_MIN_SAMPLE_TIME 50000; -static unsigned long min_sample_time; - -#define FREQ_THRESHOLD 998400; -#define RESUME_SPEED 998400; - -static int cpufreq_governor_interactivex(struct cpufreq_policy *policy, -unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX -static -#endif -struct cpufreq_governor cpufreq_gov_interactivex = { -.name = "interactiveX", -.governor = cpufreq_governor_interactivex, -#if defined(CONFIG_ARCH_MSM_SCORPION) -.max_transition_latency = 8000000, -#else -.max_transition_latency = 10000000, -#endif -.owner = THIS_MODULE, -}; - -static void cpufreq_interactivex_timer(unsigned long data) -{ -u64 delta_idle; -u64 update_time; -u64 *cpu_time_in_idle; -u64 *cpu_idle_exit_time; -struct timer_list *t; - -u64 now_idle = get_cpu_idle_time_us(data, -&update_time); - - -cpu_time_in_idle = &per_cpu(time_in_idle, data); -cpu_idle_exit_time = &per_cpu(idle_exit_time, data); - -if (update_time == *cpu_idle_exit_time) -return; - -delta_idle = cputime64_sub(now_idle, *cpu_time_in_idle); - -/* Scale up if there were no idle cycles since coming out of idle */ -if (delta_idle == 0) { -if (policy->cur == policy->max) -return; - -if (nr_running() < 1) -return; - -target_freq = policy->max; - -cpumask_set_cpu(data, &work_cpumask); -queue_work(up_wq, &freq_scale_work); -return; -} - -/* -* There is a window where if the cpu utlization can go from low to high -* between the timer expiring, delta_idle will be > 0 and the cpu will -* be 100% busy, preventing idle from running, and this timer from -* firing. So setup another timer to fire to check cpu utlization. -* Do not setup the timer if there is no scheduled work. -*/ -t = &per_cpu(cpu_timer, data); -if (!timer_pending(t) && nr_running() > 0) { -*cpu_time_in_idle = get_cpu_idle_time_us( -data, cpu_idle_exit_time); -mod_timer(t, jiffies + 2); -} - -if (policy->cur == policy->min) -return; - -/* -* Do not scale down unless we have been at this frequency for the -* minimum sample time. -*/ -if (cputime64_sub(update_time, freq_change_time) < min_sample_time) -return; - -target_freq = policy->min; -cpumask_set_cpu(data, &work_cpumask); -queue_work(down_wq, &freq_scale_work); -} - -static void cpufreq_idle(void) -{ -struct timer_list *t; -u64 *cpu_time_in_idle; -u64 *cpu_idle_exit_time; - -pm_idle_old(); - -if (!cpumask_test_cpu(smp_processor_id(), policy->cpus)) -return; - -/* Timer to fire in 1-2 ticks, jiffie aligned. */ -t = &per_cpu(cpu_timer, smp_processor_id()); -cpu_idle_exit_time = &per_cpu(idle_exit_time, smp_processor_id()); -cpu_time_in_idle = &per_cpu(time_in_idle, smp_processor_id()); - -if (timer_pending(t) == 0) { -*cpu_time_in_idle = get_cpu_idle_time_us( -smp_processor_id(), cpu_idle_exit_time); -mod_timer(t, jiffies + 2); -} -} - -/* -* Choose the cpu frequency based off the load. For now choose the minimum -* frequency that will satisfy the load, which is not always the lower power. -*/ -static unsigned int cpufreq_interactivex_calc_freq(unsigned int cpu) -{ -unsigned int delta_time; -unsigned int idle_time; -unsigned int cpu_load; -unsigned int newfreq; -u64 current_wall_time; -u64 current_idle_time;; - -current_idle_time = get_cpu_idle_time_us(cpu, ¤t_wall_time); - -idle_time = (unsigned int) current_idle_time - freq_change_time_in_idle; -delta_time = (unsigned int) current_wall_time - freq_change_time; - -cpu_load = 100 * (delta_time - idle_time) / delta_time; - -if (cpu_load > 98) newfreq = policy->max; -else newfreq = policy->cur * cpu_load / 100; - -return newfreq; -} - - -/* We use the same work function to sale up and down */ -static void cpufreq_interactivex_freq_change_time_work(struct work_struct *work) -{ -unsigned int cpu; -unsigned int newtarget; -cpumask_t tmp_mask = work_cpumask; -newtarget = FREQ_THRESHOLD; - -for_each_cpu(cpu, &tmp_mask) { -if (!suspended) { -if (target_freq == policy->max) { -if (nr_running() == 1) { -cpumask_clear_cpu(cpu, &work_cpumask); -return; -} -// __cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_H); -__cpufreq_driver_target(policy, newtarget, CPUFREQ_RELATION_H); -} else { -target_freq = cpufreq_interactivex_calc_freq(cpu); -__cpufreq_driver_target(policy, target_freq, -CPUFREQ_RELATION_L); -} -} -freq_change_time_in_idle = get_cpu_idle_time_us(cpu, &freq_change_time); -cpumask_clear_cpu(cpu, &work_cpumask); -} - - -} - -static ssize_t show_min_sample_time(struct kobject *kobj, -struct attribute *attr, char *buf) -{ -return sprintf(buf, "%lu\n", min_sample_time); -} - -static ssize_t store_min_sample_time(struct kobject *kobj, -struct attribute *attr, const char *buf, size_t count) -{ -return strict_strtoul(buf, 0, &min_sample_time); -} - -static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644, -show_min_sample_time, store_min_sample_time); - -static struct attribute *interactivex_attributes[] = { -&min_sample_time_attr.attr, -NULL, -}; - -static struct attribute_group interactivex_attr_group = { -.attrs = interactivex_attributes, -.name = "interactiveX", -}; - -static void interactivex_suspend(int suspend) -{ -unsigned int max_speed; - -max_speed = RESUME_SPEED; - -if (!enabled) return; - if (!suspend) { // resume at max speed: -suspended = 0; - __cpufreq_driver_target(policy, max_speed, CPUFREQ_RELATION_L); - pr_info("[imoseyon] interactiveX awake at %d\n", policy->cur); - } else { -suspended = 1; - __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); - pr_info("[imoseyon] interactiveX suspended at %d\n", policy->cur); - } -} - -static void interactivex_early_suspend(struct early_suspend *handler) { - interactivex_suspend(1); -} - -static void interactivex_late_resume(struct early_suspend *handler) { - interactivex_suspend(0); -} - -static struct early_suspend interactivex_power_suspend = { - .suspend = interactivex_early_suspend, - .resume = interactivex_late_resume, - .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, -}; - -static int cpufreq_governor_interactivex(struct cpufreq_policy *new_policy, -unsigned int event) -{ -int rc; -switch (event) { -case CPUFREQ_GOV_START: -if (!cpu_online(new_policy->cpu)) -return -EINVAL; - -/* -* Do not register the idle hook and create sysfs -* entries if we have already done so. -*/ -if (atomic_inc_return(&active_count) > 1) -return 0; - -rc = sysfs_create_group(cpufreq_global_kobject, -&interactivex_attr_group); -if (rc) -return rc; - -pm_idle_old = pm_idle; -pm_idle = cpufreq_idle; -policy = new_policy; -enabled = 1; - register_early_suspend(&interactivex_power_suspend); - pr_info("[imoseyon] interactiveX active\n"); -break; - -case CPUFREQ_GOV_STOP: -if (atomic_dec_return(&active_count) > 1) -return 0; - -sysfs_remove_group(cpufreq_global_kobject, -&interactivex_attr_group); - -pm_idle = pm_idle_old; -del_timer(&per_cpu(cpu_timer, new_policy->cpu)); -enabled = 0; - unregister_early_suspend(&interactivex_power_suspend); - pr_info("[imoseyon] interactiveX inactive\n"); -break; - -case CPUFREQ_GOV_LIMITS: -if (new_policy->max < new_policy->cur) -__cpufreq_driver_target(new_policy, -new_policy->max, CPUFREQ_RELATION_H); -else if (new_policy->min > new_policy->cur) -__cpufreq_driver_target(new_policy, -new_policy->min, CPUFREQ_RELATION_L); -break; -} -return 0; -} - -static int __init cpufreq_interactivex_init(void) -{ -unsigned int i; -struct timer_list *t; -min_sample_time = DEFAULT_MIN_SAMPLE_TIME; - -/* Initalize per-cpu timers */ -for_each_possible_cpu(i) { -t = &per_cpu(cpu_timer, i); -init_timer_deferrable(t); -t->function = cpufreq_interactivex_timer; -t->data = i; -} - -/* Scale up is high priority */ -up_wq = create_workqueue("kinteractive_up"); -down_wq = create_workqueue("knteractive_down"); - -INIT_WORK(&freq_scale_work, cpufreq_interactivex_freq_change_time_work); - - pr_info("[imoseyon] interactiveX enter\n"); -return cpufreq_register_governor(&cpufreq_gov_interactivex); -} - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX -fs_initcall(cpufreq_interactivex_init); -#else -module_init(cpufreq_interactivex_init); -#endif - -static void __exit cpufreq_interactivex_exit(void) -{ - pr_info("[imoseyon] interactiveX exit\n"); -cpufreq_unregister_governor(&cpufreq_gov_interactivex); -destroy_workqueue(up_wq); -destroy_workqueue(down_wq); -} - -module_exit(cpufreq_interactivex_exit); - -MODULE_AUTHOR("Mike Chan "); -MODULE_DESCRIPTION("'cpufreq_interactiveX' - A cpufreq governor for " -"Latency sensitive workloads"); -MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq_lagfree.c b/drivers/cpufreq/cpufreq_lagfree.c deleted file mode 100644 index bf274a11..00000000 --- a/drivers/cpufreq/cpufreq_lagfree.c +++ /dev/null @@ -1,662 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_lagfree.c - * - * Copyright (C) 2001 Russell King - * (C) 2003 Venkatesh Pallipadi . - * Jun Nakajima - * (C) 2004 Alexander Clouter - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ - -#define DEF_FREQUENCY_UP_THRESHOLD (50) -#define DEF_FREQUENCY_DOWN_THRESHOLD (15) -#define FREQ_STEP_DOWN (160000) -#define FREQ_SLEEP_MAX (320000) -#define FREQ_AWAKE_MIN (480000) -#define FREQ_STEP_UP_SLEEP_PERCENT (20) - -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers - * with CPUFREQ_ETERNAL), this governor will not work. - * All times here are in uS. - */ -static unsigned int def_sampling_rate; -unsigned int suspended = 0; -#define MIN_SAMPLING_RATE_RATIO (2) -/* for correct statistics, we need at least 10 ticks between each measure */ -#define MIN_STAT_SAMPLING_RATE \ - (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(CONFIG_CPU_FREQ_MIN_TICKS)) -#define MIN_SAMPLING_RATE \ - (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) -#define MAX_SAMPLING_RATE (500 * def_sampling_rate) -#define DEF_SAMPLING_DOWN_FACTOR (4) -#define MAX_SAMPLING_DOWN_FACTOR (10) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) - -static void do_dbs_timer(struct work_struct *work); - -struct cpu_dbs_info_s { - struct cpufreq_policy *cur_policy; - unsigned int prev_cpu_idle_up; - unsigned int prev_cpu_idle_down; - unsigned int enable; - unsigned int down_skip; - unsigned int requested_freq; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - -/* - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug - * lock and dbs_mutex. cpu_hotplug lock should always be held before - * dbs_mutex. If any function that can potentially take cpu_hotplug lock - * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then - * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock - * is recursive for the same process. -Venki - */ -static DEFINE_MUTEX (dbs_mutex); -static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); - -struct dbs_tuners { - unsigned int sampling_rate; - unsigned int sampling_down_factor; - unsigned int up_threshold; - unsigned int down_threshold; - unsigned int ignore_nice; - //unsigned int freq_step; -}; - -static struct dbs_tuners dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, - .ignore_nice = 1, - //.freq_step = 5, -}; - -static inline unsigned int get_cpu_idle_time(unsigned int cpu) -{ - unsigned int add_nice = 0, ret; - - if (dbs_tuners_ins.ignore_nice) - add_nice = kstat_cpu(cpu).cpustat.nice; - - ret = kstat_cpu(cpu).cpustat.idle + - kstat_cpu(cpu).cpustat.iowait + - add_nice; - - return ret; -} - -/* keep track of frequency transitions */ -static int -dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, - void *data) -{ - struct cpufreq_freqs *freq = data; - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, - freq->cpu); - - if (!this_dbs_info->enable) - return 0; - - this_dbs_info->requested_freq = freq->new; - - return 0; -} - -static struct notifier_block dbs_cpufreq_notifier_block = { - .notifier_call = dbs_cpufreq_notifier -}; - -/************************** sysfs interface ************************/ -static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) -{ - return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); -} - -static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) -{ - return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); -} - -#define define_one_ro(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0444, show_##_name, NULL) - -define_one_ro(sampling_rate_max); -define_one_ro(sampling_rate_min); - -/* cpufreq_lagfree Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct cpufreq_policy *unused, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(sampling_down_factor, sampling_down_factor); -show_one(up_threshold, up_threshold); -show_one(down_threshold, down_threshold); -show_one(ignore_nice_load, ignore_nice); -//show_one(freq_step, freq_step); - -static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf (buf, "%u", &input); - if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.sampling_down_factor = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_sampling_rate(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf (buf, "%u", &input); - - mutex_lock(&dbs_mutex); - if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.sampling_rate = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_up_threshold(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf (buf, "%u", &input); - - mutex_lock(&dbs_mutex); - if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.up_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_down_threshold(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf (buf, "%u", &input); - - mutex_lock(&dbs_mutex); - if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.down_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - mutex_lock(&dbs_mutex); - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - mutex_unlock(&dbs_mutex); - return count; - } - dbs_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); - j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; - } - mutex_unlock(&dbs_mutex); - - return count; -} - -/*static ssize_t store_freq_step(struct cpufreq_policy *policy, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - if (input > 100) - input = 100; - - / * no need to test here if freq_step is zero as the user might actually - * want this, they would be crazy though :) * / - mutex_lock(&dbs_mutex); - dbs_tuners_ins.freq_step = input; - mutex_unlock(&dbs_mutex); - - return count; -}*/ - -#define define_one_rw(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0644, show_##_name, store_##_name) - -define_one_rw(sampling_rate); -define_one_rw(sampling_down_factor); -define_one_rw(up_threshold); -define_one_rw(down_threshold); -define_one_rw(ignore_nice_load); -//define_one_rw(freq_step); - -static struct attribute * dbs_attributes[] = { - &sampling_rate_max.attr, - &sampling_rate_min.attr, - &sampling_rate.attr, - &sampling_down_factor.attr, - &up_threshold.attr, - &down_threshold.attr, - &ignore_nice_load.attr, - //&freq_step.attr, - NULL -}; - -static struct attribute_group dbs_attr_group = { - .attrs = dbs_attributes, - .name = "lagfree", -}; - -/************************** sysfs end ************************/ - -static void dbs_check_cpu(int cpu) -{ - unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; - unsigned int tmp_idle_ticks, total_idle_ticks; - unsigned int freq_target; - unsigned int freq_down_sampling_rate; - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); - struct cpufreq_policy *policy; - - if (!this_dbs_info->enable) - return; - - policy = this_dbs_info->cur_policy; - - /* - * The default safe range is 20% to 80% - * Every sampling_rate, we check - * - If current idle time is less than 20%, then we try to - * increase frequency - * Every sampling_rate*sampling_down_factor, we check - * - If current idle time is more than 80%, then we try to - * decrease frequency - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of max_frequency - */ - - /* Check for frequency increase */ - idle_ticks = UINT_MAX; - - /* Check for frequency increase */ - total_idle_ticks = get_cpu_idle_time(cpu); - tmp_idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_up; - this_dbs_info->prev_cpu_idle_up = total_idle_ticks; - - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; - - /* Scale idle ticks by 100 and compare with up and down ticks */ - idle_ticks *= 100; - up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * - usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - if (idle_ticks < up_idle_ticks) { - this_dbs_info->down_skip = 0; - this_dbs_info->prev_cpu_idle_down = - this_dbs_info->prev_cpu_idle_up; - - /* if we are already at full speed then break out early */ - if (this_dbs_info->requested_freq == policy->max && !suspended) - return; - - //freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - if (suspended) - freq_target = (FREQ_STEP_UP_SLEEP_PERCENT * policy->max) / 100; - else - freq_target = policy->max; - - /* max freq cannot be less than 100. But who knows.... */ - if (unlikely(freq_target == 0)) - freq_target = 5; - - this_dbs_info->requested_freq += freq_target; - if (this_dbs_info->requested_freq > policy->max) - this_dbs_info->requested_freq = policy->max; - - //Screen off mode - if (suspended && this_dbs_info->requested_freq > FREQ_SLEEP_MAX) - this_dbs_info->requested_freq = FREQ_SLEEP_MAX; - - //Screen off mode - if (!suspended && this_dbs_info->requested_freq < FREQ_AWAKE_MIN) - this_dbs_info->requested_freq = FREQ_AWAKE_MIN; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, - CPUFREQ_RELATION_H); - return; - } - - /* Check for frequency decrease */ - this_dbs_info->down_skip++; - if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) - return; - - /* Check for frequency decrease */ - total_idle_ticks = this_dbs_info->prev_cpu_idle_up; - tmp_idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_down; - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; - - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; - - /* Scale idle ticks by 100 and compare with up and down ticks */ - idle_ticks *= 100; - this_dbs_info->down_skip = 0; - - freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * - dbs_tuners_ins.sampling_down_factor; - down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * - usecs_to_jiffies(freq_down_sampling_rate); - - if (idle_ticks > down_idle_ticks) { - /* - * if we are already at the lowest speed then break out early - * or if we 'cannot' reduce the speed as the user might want - * freq_target to be zero - */ - if (this_dbs_info->requested_freq == policy->min && suspended - /*|| dbs_tuners_ins.freq_step == 0*/) - return; - - //freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - freq_target = FREQ_STEP_DOWN; //policy->max; - - /* max freq cannot be less than 100. But who knows.... */ - if (unlikely(freq_target == 0)) - freq_target = 5; - - // prevent going under 0 - if(freq_target > this_dbs_info->requested_freq) - this_dbs_info->requested_freq = policy->min; - else - this_dbs_info->requested_freq -= freq_target; - - if (this_dbs_info->requested_freq < policy->min) - this_dbs_info->requested_freq = policy->min; - - //Screen on mode - if (!suspended && this_dbs_info->requested_freq < FREQ_AWAKE_MIN) - this_dbs_info->requested_freq = FREQ_AWAKE_MIN; - - //Screen off mode - if (suspended && this_dbs_info->requested_freq > FREQ_SLEEP_MAX) - this_dbs_info->requested_freq = FREQ_SLEEP_MAX; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, - CPUFREQ_RELATION_H); - return; - } -} - -static void do_dbs_timer(struct work_struct *work) -{ - int i; - mutex_lock(&dbs_mutex); - for_each_online_cpu(i) - dbs_check_cpu(i); - schedule_delayed_work(&dbs_work, - usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); - mutex_unlock(&dbs_mutex); -} - -static inline void dbs_timer_init(void) -{ - init_timer_deferrable(&dbs_work.timer); - schedule_delayed_work(&dbs_work, - usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); - return; -} - -static inline void dbs_timer_exit(void) -{ - cancel_delayed_work(&dbs_work); - return; -} - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - if (this_dbs_info->enable) /* Already enabled */ - break; - - mutex_lock(&dbs_mutex); - - rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); - j_dbs_info->prev_cpu_idle_down - = j_dbs_info->prev_cpu_idle_up; - } - this_dbs_info->enable = 1; - this_dbs_info->down_skip = 0; - this_dbs_info->requested_freq = policy->cur; - - dbs_enable++; - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - unsigned int latency; - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - - def_sampling_rate = 10 * latency * - CONFIG_CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER; - - if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) - def_sampling_rate = MIN_STAT_SAMPLING_RATE; - - dbs_tuners_ins.sampling_rate = def_sampling_rate; - - dbs_timer_init(); - cpufreq_register_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } - - mutex_unlock(&dbs_mutex); - break; - - case CPUFREQ_GOV_STOP: - mutex_lock(&dbs_mutex); - this_dbs_info->enable = 0; - sysfs_remove_group(&policy->kobj, &dbs_attr_group); - dbs_enable--; - /* - * Stop the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 0) { - dbs_timer_exit(); - cpufreq_unregister_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } - - mutex_unlock(&dbs_mutex); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&dbs_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&dbs_mutex); - break; - } - return 0; -} - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_LAGFREE -static -#endif -struct cpufreq_governor cpufreq_gov_lagfree = { - .name = "lagfree", - .governor = cpufreq_governor_dbs, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, -}; - -static void lagfree_early_suspend(struct early_suspend *handler) { - suspended = 1; -} - -static void lagfree_late_resume(struct early_suspend *handler) { - suspended = 0; -} - -static struct early_suspend lagfree_power_suspend = { - .suspend = lagfree_early_suspend, - .resume = lagfree_late_resume, - .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, -}; - -static int __init cpufreq_gov_dbs_init(void) -{ - register_early_suspend(&lagfree_power_suspend); - return cpufreq_register_governor(&cpufreq_gov_lagfree); -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - /* Make sure that the scheduled work is indeed not running */ - flush_scheduled_work(); - - unregister_early_suspend(&lagfree_power_suspend); - cpufreq_unregister_governor(&cpufreq_gov_lagfree); -} - - -MODULE_AUTHOR ("Emilio López "); -MODULE_DESCRIPTION ("'cpufreq_lagfree' - A dynamic cpufreq governor for " - "Low Latency Frequency Transition capable processors " - "optimised for use in a battery environment" - "Based on conservative by Alexander Clouter"); -MODULE_LICENSE ("GPL"); - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_LAGFREE -fs_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); -#endif -module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_lulzactive.c b/drivers/cpufreq/cpufreq_lulzactive.c deleted file mode 100644 index ab5506a6..00000000 --- a/drivers/cpufreq/cpufreq_lulzactive.c +++ /dev/null @@ -1,1143 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_lulzactive.c - * - * Copyright (C) 2010 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Author: Mike Chan (mike@android.com) - * Edited: Tegrak (luciferanna@gmail.com) - * - * Driver values in /sys/devices/system/cpu/cpufreq/lulzactive - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define LULZACTIVE_VERSION (2) -#define LULZACTIVE_AUTHOR "tegrak" - -// if you changed some codes for optimization, just write your name here. -#define LULZACTIVE_TUNER "simone201" - -#define LOGI(fmt...) printk(KERN_INFO "[lulzactive] " fmt) -#define LOGW(fmt...) printk(KERN_WARNING "[lulzactive] " fmt) -#define LOGD(fmt...) printk(KERN_DEBUG "[lulzactive] " fmt) - -static void (*pm_idle_old)(void); -static atomic_t active_count = ATOMIC_INIT(0); - -struct cpufreq_lulzactive_cpuinfo { - struct timer_list cpu_timer; - int timer_idlecancel; - u64 time_in_idle; - u64 idle_exit_time; - u64 timer_run_time; - int idling; - u64 freq_change_time; - u64 freq_change_time_in_idle; - struct cpufreq_policy *policy; - struct cpufreq_frequency_table *freq_table; - unsigned int freq_table_size; - unsigned int target_freq; - int governor_enabled; -}; - -static DEFINE_PER_CPU(struct cpufreq_lulzactive_cpuinfo, cpuinfo); - -/* Workqueues handle frequency scaling */ -static struct task_struct *up_task; -static struct workqueue_struct *down_wq; -static struct work_struct freq_scale_down_work; -static cpumask_t up_cpumask; -static spinlock_t up_cpumask_lock; -static cpumask_t down_cpumask; -static spinlock_t down_cpumask_lock; - -/* - * The minimum amount of time to spend at a frequency before we can step up. - */ -#define DEFAULT_UP_SAMPLE_TIME 20000 -static unsigned long up_sample_time; - -/* - * The minimum amount of time to spend at a frequency before we can step down. - */ -#define DEFAULT_DOWN_SAMPLE_TIME 40000 -static unsigned long down_sample_time; - -/* - * DEBUG print flags - */ -static unsigned long debug_mode; -enum { - LULZACTIVE_DEBUG_EARLY_SUSPEND=1, - LULZACTIVE_DEBUG_START_STOP=2, - LULZACTIVE_DEBUG_LOAD=4, - LULZACTIVE_DEBUG_SUSPEND=8, -}; -//#define DEFAULT_DEBUG_MODE (LULZACTIVE_DEBUG_EARLY_SUSPEND | LULZACTIVE_DEBUG_START_STOP | LULZACTIVE_DEBUG_SUSPEND) -#define DEFAULT_DEBUG_MODE (0) - -/* - * CPU freq will be increased if measured load > inc_cpu_load; - */ -#define DEFAULT_INC_CPU_LOAD 75 -static unsigned long inc_cpu_load; - -/* - * CPU freq will be decreased if measured load < dec_cpu_load; - * not implemented yet. - */ -#define DEFAULT_DEC_CPU_LOAD 30 -static unsigned long dec_cpu_load; - -/* - * Increasing frequency table index - * zero disables and causes to always jump straight to max frequency. - */ -#define DEFAULT_PUMP_UP_STEP 1 -static unsigned long pump_up_step; - -/* - * Decreasing frequency table index - * zero disables and will calculate frequency according to load heuristic. - */ -#define DEFAULT_PUMP_DOWN_STEP 1 -static unsigned long pump_down_step; - -/* - * Use minimum frequency while suspended. - */ -static unsigned int suspending; -static unsigned int early_suspended; - -#define SCREEN_OFF_LOWEST_STEP (0xffffffff) -#define DEFAULT_SCREEN_OFF_MIN_STEP (SCREEN_OFF_LOWEST_STEP) -static unsigned long screen_off_min_step; - -#define DEBUG 0 -#define BUFSZ 128 - -#if DEBUG -#include - -struct dbgln { - int cpu; - unsigned long jiffy; - unsigned long run; - char buf[BUFSZ]; -}; - -#define NDBGLNS 256 - -static struct dbgln dbgbuf[NDBGLNS]; -static int dbgbufs; -static int dbgbufe; -static struct proc_dir_entry *dbg_proc; -static spinlock_t dbgpr_lock; - -static u64 up_request_time; -static unsigned int up_max_latency; - -static void dbgpr(char *fmt, ...) -{ - va_list args; - int n; - unsigned long flags; - - spin_lock_irqsave(&dbgpr_lock, flags); - n = dbgbufe; - va_start(args, fmt); - vsnprintf(dbgbuf[n].buf, BUFSZ, fmt, args); - va_end(args); - dbgbuf[n].cpu = smp_processor_id(); - dbgbuf[n].run = nr_running(); - dbgbuf[n].jiffy = jiffies; - - if (++dbgbufe >= NDBGLNS) - dbgbufe = 0; - - if (dbgbufe == dbgbufs) - if (++dbgbufs >= NDBGLNS) - dbgbufs = 0; - - spin_unlock_irqrestore(&dbgpr_lock, flags); -} - -static void dbgdump(void) -{ - int i, j; - unsigned long flags; - static struct dbgln prbuf[NDBGLNS]; - - spin_lock_irqsave(&dbgpr_lock, flags); - i = dbgbufs; - j = dbgbufe; - memcpy(prbuf, dbgbuf, sizeof(dbgbuf)); - dbgbufs = 0; - dbgbufe = 0; - spin_unlock_irqrestore(&dbgpr_lock, flags); - - while (i != j) - { - printk("%lu %d %lu %s", - prbuf[i].jiffy, prbuf[i].cpu, prbuf[i].run, - prbuf[i].buf); - if (++i == NDBGLNS) - i = 0; - } -} - -static int dbg_proc_read(char *buffer, char **start, off_t offset, - int count, int *peof, void *dat) -{ - printk("max up_task latency=%uus\n", up_max_latency); - dbgdump(); - *peof = 1; - return 0; -} - - -#else -#define dbgpr(...) do {} while (0) -#endif - -static int cpufreq_governor_lulzactive(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_LULZACTIVE -static -#endif -struct cpufreq_governor cpufreq_gov_lulzactive = { - .name = "lulzactive", - .governor = cpufreq_governor_lulzactive, - .max_transition_latency = 9000000, - .owner = THIS_MODULE, -}; - -static unsigned int get_freq_table_size(struct cpufreq_frequency_table *freq_table) { - unsigned int size = 0; - while (freq_table[++size].frequency != CPUFREQ_TABLE_END); - return size; -} - -static inline void fix_screen_off_min_step(struct cpufreq_lulzactive_cpuinfo *pcpu) { - if (pcpu->freq_table_size <= 0) { - screen_off_min_step = 0; - return; - } - - if (DEFAULT_SCREEN_OFF_MIN_STEP == screen_off_min_step) - screen_off_min_step = pcpu->freq_table_size - 3; - - if (screen_off_min_step >= pcpu->freq_table_size) - screen_off_min_step = pcpu->freq_table_size - 3; -} - -static inline unsigned int adjust_screen_off_freq( - struct cpufreq_lulzactive_cpuinfo *pcpu, unsigned int freq) { - - if (early_suspended && freq > pcpu->freq_table[screen_off_min_step].frequency) { - freq = pcpu->freq_table[screen_off_min_step].frequency; - pcpu->target_freq = pcpu->policy->cur; - - if (freq > pcpu->policy->max) - freq = pcpu->policy->max; - if (freq < pcpu->policy->min) - freq = pcpu->policy->min; - } - - return freq; -} - -static void cpufreq_lulzactive_timer(unsigned long data) -{ - unsigned int delta_idle; - unsigned int delta_time; - int cpu_load; - int load_since_change; - u64 time_in_idle; - u64 idle_exit_time; - struct cpufreq_lulzactive_cpuinfo *pcpu = - &per_cpu(cpuinfo, data); - u64 now_idle; - unsigned int new_freq; - int index; - int ret; - - /* - * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time, - * this lets idle exit know the current idle time sample has - * been processed, and idle exit can generate a new sample and - * re-arm the timer. This prevents a concurrent idle - * exit on that CPU from writing a new set of info at the same time - * the timer function runs (the timer function can't use that info - * until more time passes). - */ - time_in_idle = pcpu->time_in_idle; - idle_exit_time = pcpu->idle_exit_time; - now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time); - smp_wmb(); - - /* If we raced with cancelling a timer, skip. */ - if (!idle_exit_time) { - dbgpr("timer %d: no valid idle exit sample\n", (int) data); - goto exit; - } - - /* let it be when s5pv310 contorl the suspending by tegrak */ - //if (suspending) { - // goto rearm; - //} - -#if DEBUG - if ((int) jiffies - (int) pcpu->cpu_timer.expires >= 10) - dbgpr("timer %d: late by %d ticks\n", - (int) data, jiffies - pcpu->cpu_timer.expires); -#endif - - delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle); - delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time, - idle_exit_time); - - /* - * If timer ran less than 1ms after short-term sample started, retry. - */ - if (delta_time < 1000) { - dbgpr("timer %d: time delta %u too short exit=%llu now=%llu\n", (int) data, - delta_time, idle_exit_time, pcpu->timer_run_time); - goto rearm; - } - - if (delta_idle > delta_time) - cpu_load = 0; - else - cpu_load = 100 * (delta_time - delta_idle) / delta_time; - - delta_idle = (unsigned int) cputime64_sub(now_idle, - pcpu->freq_change_time_in_idle); - delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time, - pcpu->freq_change_time); - - if (delta_idle > delta_time) - load_since_change = 0; - else - load_since_change = - 100 * (delta_time - delta_idle) / delta_time; - - /* - * Choose greater of short-term load (since last idle timer - * started or timer function re-armed itself) or long-term load - * (since last frequency change). - */ - if (load_since_change > cpu_load) - cpu_load = load_since_change; - - /* - * START lulzactive algorithm section - */ - if (cpu_load >= inc_cpu_load) { - if (pump_up_step && pcpu->policy->cur < pcpu->policy->max) { - ret = cpufreq_frequency_table_target( - pcpu->policy, pcpu->freq_table, - pcpu->policy->cur, CPUFREQ_RELATION_H, - &index); - if (ret < 0) { - goto rearm; - } - - // apply pump_up_step by tegrak - index -= pump_up_step; - if (index < 0) - index = 0; - - new_freq = pcpu->freq_table[index].frequency; - } - else { - new_freq = pcpu->policy->max; - } - } - else { - if (pump_down_step) { - ret = cpufreq_frequency_table_target( - pcpu->policy, pcpu->freq_table, - pcpu->policy->cur, CPUFREQ_RELATION_H, - &index); - if (ret < 0) { - goto rearm; - } - - // apply pump_down_step by tegrak - index += pump_down_step; - if (index >= pcpu->freq_table_size) { - index = pcpu->freq_table_size - 1; - } - - new_freq = (pcpu->policy->cur > pcpu->policy->min) ? - (pcpu->freq_table[index].frequency) : - (pcpu->policy->min); - } - else { - new_freq = pcpu->policy->max * cpu_load / 100; - ret = cpufreq_frequency_table_target( - pcpu->policy, pcpu->freq_table, - new_freq, CPUFREQ_RELATION_H, - &index); - if (ret < 0) { - goto rearm; - } - new_freq = pcpu->freq_table[index].frequency; - } - } - - // adjust freq when screen off - new_freq = adjust_screen_off_freq(pcpu, new_freq); - - if (pcpu->target_freq == new_freq) - { - dbgpr("timer %d: load=%d, already at %d\n", (int) data, cpu_load, new_freq); - goto rearm_if_notmax; - } - - /* - * Do not scale down unless we have been at this frequency for the - * minimum sample time. - */ - if (new_freq < pcpu->target_freq) { - if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) < - down_sample_time) { - dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq); - goto rearm; - } - } - else { - if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) < - up_sample_time) { - dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq); - /* don't reset timer */ - goto rearm; - } - } - - if (suspending && debug_mode & LULZACTIVE_DEBUG_SUSPEND) { - LOGI("suspending: cpu_load=%d%% new_freq=%u ppcpu->policy->cur=%u\n", - cpu_load, new_freq, pcpu->policy->cur); - } - if (early_suspended && !suspending && debug_mode & LULZACTIVE_DEBUG_LOAD) { - LOGI("early_suspended: cpu_load=%d%% new_freq=%u ppcpu->policy->cur=%u\n", - cpu_load, new_freq, pcpu->policy->cur); - } - if (debug_mode & LULZACTIVE_DEBUG_LOAD && !early_suspended && !suspending) { - LOGI("cpu_load=%d%% new_freq=%u pcpu->target_freq=%u pcpu->policy->cur=%u\n", - cpu_load, new_freq, pcpu->target_freq, pcpu->policy->cur); - } - - dbgpr("timer %d: load=%d cur=%d tgt=%d queue\n", (int) data, cpu_load, pcpu->target_freq, new_freq); - - if (new_freq < pcpu->target_freq) { - pcpu->target_freq = new_freq; - spin_lock(&down_cpumask_lock); - cpumask_set_cpu(data, &down_cpumask); - spin_unlock(&down_cpumask_lock); - queue_work(down_wq, &freq_scale_down_work); - } else { - pcpu->target_freq = new_freq; -#if DEBUG - up_request_time = ktime_to_us(ktime_get()); -#endif - spin_lock(&up_cpumask_lock); - cpumask_set_cpu(data, &up_cpumask); - spin_unlock(&up_cpumask_lock); - wake_up_process(up_task); - } - -rearm_if_notmax: - /* - * Already set max speed and don't see a need to change that, - * wait until next idle to re-evaluate, don't need timer. - */ - if (pcpu->target_freq == pcpu->policy->max) - goto exit; - -rearm: - if (!timer_pending(&pcpu->cpu_timer)) { - /* - * If already at min: if that CPU is idle, don't set timer. - * Else cancel the timer if that CPU goes idle. We don't - * need to re-evaluate speed until the next idle exit. - */ - if (pcpu->target_freq == pcpu->policy->min) { - smp_rmb(); - - if (pcpu->idling) { - dbgpr("timer %d: cpu idle, don't re-arm\n", (int) data); - goto exit; - } - - pcpu->timer_idlecancel = 1; - } - - pcpu->time_in_idle = get_cpu_idle_time_us( - data, &pcpu->idle_exit_time); - mod_timer(&pcpu->cpu_timer, jiffies + 2); - dbgpr("timer %d: set timer for %lu exit=%llu\n", (int) data, pcpu->cpu_timer.expires, pcpu->idle_exit_time); - } - -exit: - return; -} - -static void cpufreq_lulzactive_idle(void) -{ - struct cpufreq_lulzactive_cpuinfo *pcpu = - &per_cpu(cpuinfo, smp_processor_id()); - int pending; - - if (!pcpu->governor_enabled) { - pm_idle_old(); - return; - } - - pcpu->idling = 1; - smp_wmb(); - pending = timer_pending(&pcpu->cpu_timer); - - if (pcpu->target_freq != pcpu->policy->min) { -#ifdef CONFIG_SMP - /* - * Entering idle while not at lowest speed. On some - * platforms this can hold the other CPU(s) at that speed - * even though the CPU is idle. Set a timer to re-evaluate - * speed so this idle CPU doesn't hold the other CPUs above - * min indefinitely. This should probably be a quirk of - * the CPUFreq driver. - */ - if (!pending) { - pcpu->time_in_idle = get_cpu_idle_time_us( - smp_processor_id(), &pcpu->idle_exit_time); - pcpu->timer_idlecancel = 0; - mod_timer(&pcpu->cpu_timer, jiffies + 2); - dbgpr("idle: enter at %d, set timer for %lu exit=%llu\n", - pcpu->target_freq, pcpu->cpu_timer.expires, - pcpu->idle_exit_time); - } -#endif - } else { - /* - * If at min speed and entering idle after load has - * already been evaluated, and a timer has been set just in - * case the CPU suddenly goes busy, cancel that timer. The - * CPU didn't go busy; we'll recheck things upon idle exit. - */ - if (pending && pcpu->timer_idlecancel) { - dbgpr("idle: cancel timer for %lu\n", pcpu->cpu_timer.expires); - del_timer(&pcpu->cpu_timer); - /* - * Ensure last timer run time is after current idle - * sample start time, so next idle exit will always - * start a new idle sampling period. - */ - pcpu->idle_exit_time = 0; - pcpu->timer_idlecancel = 0; - } - } - - pm_idle_old(); - pcpu->idling = 0; - smp_wmb(); - - /* - * Arm the timer for 1-2 ticks later if not already, and if the timer - * function has already processed the previous load sampling - * interval. (If the timer is not pending but has not processed - * the previous interval, it is probably racing with us on another - * CPU. Let it compute load based on the previous sample and then - * re-arm the timer for another interval when it's done, rather - * than updating the interval start time to be "now", which doesn't - * give the timer function enough time to make a decision on this - * run.) - */ - if (timer_pending(&pcpu->cpu_timer) == 0 && - pcpu->timer_run_time >= pcpu->idle_exit_time) { - pcpu->time_in_idle = - get_cpu_idle_time_us(smp_processor_id(), - &pcpu->idle_exit_time); - pcpu->timer_idlecancel = 0; - mod_timer(&pcpu->cpu_timer, jiffies + 2); - dbgpr("idle: exit, set timer for %lu exit=%llu\n", pcpu->cpu_timer.expires, pcpu->idle_exit_time); -#if DEBUG - } else if (timer_pending(&pcpu->cpu_timer) == 0 && - pcpu->timer_run_time < pcpu->idle_exit_time) { - dbgpr("idle: timer not run yet: exit=%llu tmrrun=%llu\n", - pcpu->idle_exit_time, pcpu->timer_run_time); -#endif - } - -} - -static int cpufreq_lulzactive_up_task(void *data) -{ - unsigned int cpu; - cpumask_t tmp_mask; - struct cpufreq_lulzactive_cpuinfo *pcpu; - -#if DEBUG - u64 now; - u64 then; - unsigned int lat; -#endif - - while (1) { - set_current_state(TASK_INTERRUPTIBLE); - spin_lock(&up_cpumask_lock); - - if (cpumask_empty(&up_cpumask)) { - spin_unlock(&up_cpumask_lock); - schedule(); - - if (kthread_should_stop()) - break; - - spin_lock(&up_cpumask_lock); - } - - set_current_state(TASK_RUNNING); - -#if DEBUG - then = up_request_time; - now = ktime_to_us(ktime_get()); - - if (now > then) { - lat = ktime_to_us(ktime_get()) - then; - - if (lat > up_max_latency) - up_max_latency = lat; - } -#endif - - tmp_mask = up_cpumask; - cpumask_clear(&up_cpumask); - spin_unlock(&up_cpumask_lock); - - for_each_cpu(cpu, &tmp_mask) { - pcpu = &per_cpu(cpuinfo, cpu); - - if (nr_running() == 1) { - dbgpr("up %d: tgt=%d nothing else running\n", cpu, - pcpu->target_freq); - } - - __cpufreq_driver_target(pcpu->policy, - pcpu->target_freq, - CPUFREQ_RELATION_H); - pcpu->freq_change_time_in_idle = - get_cpu_idle_time_us(cpu, - &pcpu->freq_change_time); - dbgpr("up %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur); - } - } - - return 0; -} - -static void cpufreq_lulzactive_freq_down(struct work_struct *work) -{ - unsigned int cpu; - cpumask_t tmp_mask; - struct cpufreq_lulzactive_cpuinfo *pcpu; - - spin_lock(&down_cpumask_lock); - tmp_mask = down_cpumask; - cpumask_clear(&down_cpumask); - spin_unlock(&down_cpumask_lock); - - for_each_cpu(cpu, &tmp_mask) { - pcpu = &per_cpu(cpuinfo, cpu); - __cpufreq_driver_target(pcpu->policy, - pcpu->target_freq, - CPUFREQ_RELATION_H); - pcpu->freq_change_time_in_idle = - get_cpu_idle_time_us(cpu, - &pcpu->freq_change_time); - dbgpr("down %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur); - } -} - -// inc_cpu_load -static ssize_t show_inc_cpu_load(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", inc_cpu_load); -} - -static ssize_t store_inc_cpu_load(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - ssize_t ret; - if(strict_strtoul(buf, 0, &inc_cpu_load)==-EINVAL) return -EINVAL; - - if (inc_cpu_load > 100) { - inc_cpu_load = 100; - } - else if (inc_cpu_load < 10) { - inc_cpu_load = 10; - } - return count; -} - -static struct global_attr inc_cpu_load_attr = __ATTR(inc_cpu_load, 0666, - show_inc_cpu_load, store_inc_cpu_load); - -// down_sample_time -static ssize_t show_down_sample_time(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", down_sample_time); -} - -static ssize_t store_down_sample_time(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - if(strict_strtoul(buf, 0, &down_sample_time)==-EINVAL) return -EINVAL; - return count; -} - -static struct global_attr down_sample_time_attr = __ATTR(down_sample_time, 0666, - show_down_sample_time, store_down_sample_time); - -// up_sample_time -static ssize_t show_up_sample_time(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", up_sample_time); -} - -static ssize_t store_up_sample_time(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - if(strict_strtoul(buf, 0, &up_sample_time)==-EINVAL) return -EINVAL; - return count; -} - -static struct global_attr up_sample_time_attr = __ATTR(up_sample_time, 0666, - show_up_sample_time, store_up_sample_time); - -// debug_mode -static ssize_t show_debug_mode(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", debug_mode); -} - -static ssize_t store_debug_mode(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - if(strict_strtoul(buf, 0, &debug_mode)==-EINVAL) return -EINVAL; - return count; -} - -static struct global_attr debug_mode_attr = __ATTR(debug_mode, 0666, - show_debug_mode, store_debug_mode); - -// pump_up_step -static ssize_t show_pump_up_step(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", pump_up_step); -} - -static ssize_t store_pump_up_step(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - if(strict_strtoul(buf, 0, &pump_up_step)==-EINVAL) return -EINVAL; - return count; -} - -static struct global_attr pump_up_step_attr = __ATTR(pump_up_step, 0666, - show_pump_up_step, store_pump_up_step); - -// pump_down_step -static ssize_t show_pump_down_step(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", pump_down_step); -} - -static ssize_t store_pump_down_step(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - ssize_t ret; - struct cpufreq_lulzactive_cpuinfo *pcpu; - - if(strict_strtoul(buf, 0, &pump_down_step)==-EINVAL) return -EINVAL; - - pcpu = &per_cpu(cpuinfo, 0); - // fix out of bound - if (pcpu->freq_table_size <= pump_down_step) { - pump_down_step = pcpu->freq_table_size - 1; - } - return count; -} - -static struct global_attr pump_down_step_attr = __ATTR(pump_down_step, 0666, - show_pump_down_step, store_pump_down_step); - -// screen_off_min_step -static ssize_t show_screen_off_min_step(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - struct cpufreq_lulzactive_cpuinfo *pcpu; - - pcpu = &per_cpu(cpuinfo, 0); - fix_screen_off_min_step(pcpu); - - return sprintf(buf, "%lu\n", screen_off_min_step); -} - -static ssize_t store_screen_off_min_step(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - struct cpufreq_lulzactive_cpuinfo *pcpu; - ssize_t ret; - - if(strict_strtoul(buf, 0, &screen_off_min_step)==-EINVAL) return -EINVAL; - - pcpu = &per_cpu(cpuinfo, 0); - fix_screen_off_min_step(pcpu); - - return count; -} - -static struct global_attr screen_off_min_step_attr = __ATTR(screen_off_min_step, 0666, - show_screen_off_min_step, store_screen_off_min_step); - -// author -static ssize_t show_author(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%s\n", LULZACTIVE_AUTHOR); -} - -static struct global_attr author_attr = __ATTR(author, 0444, - show_author, NULL); - -// tuner -static ssize_t show_tuner(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%s\n", LULZACTIVE_TUNER); -} - -static struct global_attr tuner_attr = __ATTR(tuner, 0444, - show_tuner, NULL); - -// version -static ssize_t show_version(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", LULZACTIVE_VERSION); -} - -static struct global_attr version_attr = __ATTR(version, 0444, - show_version, NULL); - -// freq_table -static ssize_t show_freq_table(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - struct cpufreq_lulzactive_cpuinfo *pcpu; - char temp[64]; - int i; - - pcpu = &per_cpu(cpuinfo, 0); - - for (i = 0; i < pcpu->freq_table_size; i++) { - sprintf(temp, "%u\n", pcpu->freq_table[i].frequency); - strcat(buf, temp); - } - - return strlen(buf); -} - -static struct global_attr freq_table_attr = __ATTR(freq_table, 0444, - show_freq_table, NULL); - -static struct attribute *lulzactive_attributes[] = { - &inc_cpu_load_attr.attr, - &up_sample_time_attr.attr, - &down_sample_time_attr.attr, - &pump_up_step_attr.attr, - &pump_down_step_attr.attr, - &screen_off_min_step_attr.attr, - &debug_mode_attr.attr, - &author_attr.attr, - &tuner_attr.attr, - &version_attr.attr, - &freq_table_attr.attr, - NULL, -}; - -static struct attribute_group lulzactive_attr_group = { - .attrs = lulzactive_attributes, - .name = "lulzactive", -}; - -static int cpufreq_governor_lulzactive(struct cpufreq_policy *new_policy, - unsigned int event) -{ - int rc; - struct cpufreq_lulzactive_cpuinfo *pcpu = - &per_cpu(cpuinfo, new_policy->cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if (debug_mode & LULZACTIVE_DEBUG_START_STOP) { - LOGI("CPUFREQ_GOV_START\n"); - } - if (!cpu_online(new_policy->cpu)) - return -EINVAL; - - pcpu->policy = new_policy; - pcpu->freq_table = cpufreq_frequency_get_table(new_policy->cpu); - pcpu->target_freq = new_policy->cur; - pcpu->freq_change_time_in_idle = - get_cpu_idle_time_us(new_policy->cpu, - &pcpu->freq_change_time); - pcpu->governor_enabled = 1; - pcpu->freq_table_size = get_freq_table_size(pcpu->freq_table); - - // fix invalid screen_off_min_step - fix_screen_off_min_step(pcpu); - - /* - * Do not register the idle hook and create sysfs - * entries if we have already done so. - */ - if (atomic_inc_return(&active_count) > 1) - return 0; - - rc = sysfs_create_group(cpufreq_global_kobject, - &lulzactive_attr_group); - if (rc) - return rc; - - pm_idle_old = pm_idle; - pm_idle = cpufreq_lulzactive_idle; - break; - - case CPUFREQ_GOV_STOP: - if (debug_mode & LULZACTIVE_DEBUG_START_STOP) { - LOGI("CPUFREQ_GOV_STOP\n"); - } - pcpu->governor_enabled = 0; - - if (atomic_dec_return(&active_count) > 0) - return 0; - - sysfs_remove_group(cpufreq_global_kobject, - &lulzactive_attr_group); - - pm_idle = pm_idle_old; - del_timer(&pcpu->cpu_timer); - break; - - case CPUFREQ_GOV_LIMITS: - if (new_policy->max < new_policy->cur) - __cpufreq_driver_target(new_policy, - new_policy->max, CPUFREQ_RELATION_H); - else if (new_policy->min > new_policy->cur) - __cpufreq_driver_target(new_policy, - new_policy->min, CPUFREQ_RELATION_L); - break; - } - return 0; -} - -static void lulzactive_early_suspend(struct early_suspend *handler) { - struct cpufreq_lulzactive_cpuinfo *pcpu; - unsigned int min_freq, max_freq; - - early_suspended = 1; - - if (debug_mode & LULZACTIVE_DEBUG_EARLY_SUSPEND) { - LOGI("%s\n", __func__); - - pcpu = &per_cpu(cpuinfo, 0); - - min_freq = pcpu->policy->min; - - max_freq = min(pcpu->policy->max, pcpu->freq_table[screen_off_min_step].frequency); - max_freq = max(max_freq, min_freq); - - LOGI("lock @%u~@%uMHz\n", min_freq / 1000, max_freq / 1000); - } -} - -static void lulzactive_late_resume(struct early_suspend *handler) { - early_suspended = 0; - if (debug_mode & LULZACTIVE_DEBUG_EARLY_SUSPEND) { - LOGI("%s\n", __func__); - } -} - -static struct early_suspend lulzactive_power_suspend = { - .suspend = lulzactive_early_suspend, - .resume = lulzactive_late_resume, - .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, -}; - -static int lulzactive_pm_notifier_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct cpufreq_policy* policy; - - switch (event) { - case PM_SUSPEND_PREPARE: - suspending = 1; - if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { - LOGI("PM_SUSPEND_PREPARE"); - policy = cpufreq_cpu_get(0); - if (policy) { - LOGI("PM_SUSPEND_PREPARE using @%uMHz\n", policy->cur); - } - } - break; - case PM_POST_SUSPEND: - suspending = 0; - if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { - LOGI("PM_POST_SUSPEND"); - policy = cpufreq_cpu_get(0); - if (policy) { - LOGI("PM_POST_SUSPEND using @%uMHz\n", policy->cur); - } - } - break; - case PM_RESTORE_PREPARE: - if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { - LOGI("PM_RESTORE_PREPARE"); - } - break; - case PM_POST_RESTORE: - if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { - LOGI("PM_POST_RESTORE"); - } - break; - case PM_HIBERNATION_PREPARE: - if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { - LOGI("PM_HIBERNATION_PREPARE"); - } - break; - case PM_POST_HIBERNATION: - if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { - LOGI("PM_POST_HIBERNATION"); - } - break; - } - return NOTIFY_DONE; -} - -static struct notifier_block lulzactive_pm_notifier = { - .notifier_call = lulzactive_pm_notifier_event, -}; - -static int __init cpufreq_lulzactive_init(void) -{ - unsigned int i; - struct cpufreq_lulzactive_cpuinfo *pcpu; - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - - up_sample_time = DEFAULT_UP_SAMPLE_TIME; - down_sample_time = DEFAULT_DOWN_SAMPLE_TIME; - debug_mode = DEFAULT_DEBUG_MODE; - inc_cpu_load = DEFAULT_INC_CPU_LOAD; - dec_cpu_load = DEFAULT_DEC_CPU_LOAD; - pump_up_step = DEFAULT_PUMP_UP_STEP; - pump_down_step = DEFAULT_PUMP_DOWN_STEP; - early_suspended = 0; - suspending = 0; - screen_off_min_step = DEFAULT_SCREEN_OFF_MIN_STEP; - - /* Initalize per-cpu timers */ - for_each_possible_cpu(i) { - pcpu = &per_cpu(cpuinfo, i); - init_timer(&pcpu->cpu_timer); - pcpu->cpu_timer.function = cpufreq_lulzactive_timer; - pcpu->cpu_timer.data = i; - } - - up_task = kthread_create(cpufreq_lulzactive_up_task, NULL, - "klulzactiveup"); - if (IS_ERR(up_task)) - return PTR_ERR(up_task); - - sched_setscheduler_nocheck(up_task, SCHED_FIFO, ¶m); - get_task_struct(up_task); - - /* No rescuer thread, bind to CPU queuing the work for possibly - warm cache (probably doesn't matter much). */ - down_wq = create_workqueue("klulzactive_down"); - - if (! down_wq) - goto err_freeuptask; - - INIT_WORK(&freq_scale_down_work, - cpufreq_lulzactive_freq_down); - -#if DEBUG - spin_lock_init(&dbgpr_lock); - dbg_proc = create_proc_entry("igov", S_IWUSR | S_IRUGO, NULL); - dbg_proc->read_proc = dbg_proc_read; -#endif - spin_lock_init(&down_cpumask_lock); - spin_lock_init(&up_cpumask_lock); - - register_pm_notifier(&lulzactive_pm_notifier); - register_early_suspend(&lulzactive_power_suspend); - - return cpufreq_register_governor(&cpufreq_gov_lulzactive); - -err_freeuptask: - put_task_struct(up_task); - return -ENOMEM; -} - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_LULZACTIVE -fs_initcall(cpufreq_lulzactive_init); -#else -module_init(cpufreq_lulzactive_init); -#endif - -static void __exit cpufreq_lulzactive_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_lulzactive); - unregister_early_suspend(&lulzactive_power_suspend); - unregister_pm_notifier(&lulzactive_pm_notifier); - kthread_stop(up_task); - put_task_struct(up_task); - destroy_workqueue(down_wq); -} - -module_exit(cpufreq_lulzactive_exit); - -MODULE_AUTHOR("Tegrak "); -MODULE_DESCRIPTION("'lulzactive' - improved interactive governor inspired by smartass"); -MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq_minmax.c b/drivers/cpufreq/cpufreq_minmax.c deleted file mode 100644 index 09dba0d2..00000000 --- a/drivers/cpufreq/cpufreq_minmax.c +++ /dev/null @@ -1,575 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_minmax.c - * - * Copyright (C) 2001 Russell King - * (C) 2003 Venkatesh Pallipadi . - * Jun Nakajima - * (C) 2004 Alexander Clouter - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This governor is an adapatation of the conservative governor. - * See the Documentation/cpu-freq/governors.txt for more information. - * - * Adapatation from conservative by Erasmux. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ - -#define DEF_FREQUENCY_UP_THRESHOLD (92) -#define DEF_FREQUENCY_DOWN_THRESHOLD (27) - -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers - * with CPUFREQ_ETERNAL), this governor will not work. - * All times here are in uS. - */ -static unsigned int def_sampling_rate; -#define MIN_SAMPLING_RATE_RATIO (2) -/* for correct statistics, we need at least 10 ticks between each measure */ -#define MIN_STAT_SAMPLING_RATE \ - (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(CONFIG_CPU_FREQ_MIN_TICKS)) -#define MIN_SAMPLING_RATE \ - (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) -#define MAX_SAMPLING_RATE (500 * def_sampling_rate) -#define DEF_SAMPLING_DOWN_FACTOR (10) -#define MAX_SAMPLING_DOWN_FACTOR (100) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) -#define CONFIG_CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER (500) -#define CONFIG_CPU_FREQ_MIN_TICKS (2) - -static void do_dbs_timer(struct work_struct *work); - -struct cpu_dbs_info_s { - struct cpufreq_policy *cur_policy; - unsigned int prev_cpu_idle_up; - unsigned int prev_cpu_idle_down; - unsigned int enable; - unsigned int down_skip; - unsigned int requested_freq; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - -/* - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug - * lock and dbs_mutex. cpu_hotplug lock should always be held before - * dbs_mutex. If any function that can potentially take cpu_hotplug lock - * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then - * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock - * is recursive for the same process. -Venki - */ -static DEFINE_MUTEX (dbs_mutex); -static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); - -struct dbs_tuners { - unsigned int sampling_rate; - unsigned int sampling_down_factor; - unsigned int up_threshold; - unsigned int down_threshold; - unsigned int ignore_nice; -}; - -static struct dbs_tuners dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, - .ignore_nice = 0, -}; - -static inline unsigned int get_cpu_idle_time(unsigned int cpu) -{ - unsigned int add_nice = 0, ret; - - if (dbs_tuners_ins.ignore_nice) - add_nice = kstat_cpu(cpu).cpustat.nice; - - ret = kstat_cpu(cpu).cpustat.idle + - kstat_cpu(cpu).cpustat.iowait + - add_nice; - - return ret; -} - -/* keep track of frequency transitions */ -static int -dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, - void *data) -{ - struct cpufreq_freqs *freq = data; - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, - freq->cpu); - - if (!this_dbs_info->enable) - return 0; - - this_dbs_info->requested_freq = freq->new; - - return 0; -} - -static struct notifier_block dbs_cpufreq_notifier_block = { - .notifier_call = dbs_cpufreq_notifier -}; - -/************************** sysfs interface ************************/ -static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) -{ - return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); -} - -static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) -{ - return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); -} - -#define define_one_ro(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0444, show_##_name, NULL) - -define_one_ro(sampling_rate_max); -define_one_ro(sampling_rate_min); - -/* cpufreq_minmax Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct cpufreq_policy *unused, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(sampling_down_factor, sampling_down_factor); -show_one(up_threshold, up_threshold); -show_one(down_threshold, down_threshold); -show_one(ignore_nice_load, ignore_nice); - -static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf (buf, "%u", &input); - if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.sampling_down_factor = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_sampling_rate(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf (buf, "%u", &input); - - mutex_lock(&dbs_mutex); - if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.sampling_rate = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_up_threshold(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf (buf, "%u", &input); - - mutex_lock(&dbs_mutex); - if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.up_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_down_threshold(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf (buf, "%u", &input); - - mutex_lock(&dbs_mutex); - if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.down_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - mutex_lock(&dbs_mutex); - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - mutex_unlock(&dbs_mutex); - return count; - } - dbs_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); - j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; - } - mutex_unlock(&dbs_mutex); - - return count; -} - -#define define_one_rw(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0644, show_##_name, store_##_name) - -define_one_rw(sampling_rate); -define_one_rw(sampling_down_factor); -define_one_rw(up_threshold); -define_one_rw(down_threshold); -define_one_rw(ignore_nice_load); - -static struct attribute * dbs_attributes[] = { - &sampling_rate_max.attr, - &sampling_rate_min.attr, - &sampling_rate.attr, - &sampling_down_factor.attr, - &up_threshold.attr, - &down_threshold.attr, - &ignore_nice_load.attr, - NULL -}; - -static struct attribute_group dbs_attr_group = { - .attrs = dbs_attributes, - .name = "minmax", -}; - -/************************** sysfs end ************************/ - -static void dbs_check_cpu(int cpu) -{ - unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; - unsigned int tmp_idle_ticks, total_idle_ticks; - //unsigned int freq_target; - unsigned int freq_down_sampling_rate; - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); - struct cpufreq_policy *policy; - - if (!this_dbs_info->enable) - return; - - policy = this_dbs_info->cur_policy; - - /* - * The default safe range is 20% to 80% - * Every sampling_rate, we check - * - If current idle time is less than 20%, then we try to - * increase frequency - * Every sampling_rate*sampling_down_factor, we check - * - If current idle time is more than 80%, then we try to - * decrease frequency - * - */ - - this_dbs_info->down_skip++; - - /* Check for frequency increase */ - idle_ticks = UINT_MAX; - - /* Check for frequency increase */ - total_idle_ticks = get_cpu_idle_time(cpu); - tmp_idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_up; - this_dbs_info->prev_cpu_idle_up = total_idle_ticks; - - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; - - /* Scale idle ticks by 100 and compare with up and down ticks */ - idle_ticks *= 100; - up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * - usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - if (idle_ticks < up_idle_ticks) { - this_dbs_info->down_skip = 0; - this_dbs_info->prev_cpu_idle_down = - this_dbs_info->prev_cpu_idle_up; - - /* if we are already at full speed then break out early */ - if (this_dbs_info->requested_freq == policy->max) - return; - - this_dbs_info->requested_freq = policy->max; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, - CPUFREQ_RELATION_H); - return; - } - - /* Check for frequency decrease */ - if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) - return; - else this_dbs_info->down_skip--; /* just to prevent overflow */ - - - /* Check for frequency decrease */ - total_idle_ticks = this_dbs_info->prev_cpu_idle_up; - tmp_idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_down; - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; - - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; - - /* Scale idle ticks by 100 and compare with up and down ticks */ - idle_ticks *= 100; - - freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * - dbs_tuners_ins.sampling_down_factor; - down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * - usecs_to_jiffies(freq_down_sampling_rate); - - if (idle_ticks > down_idle_ticks) { - /* - * if we are already at the lowest speed then break out early - * or if we 'cannot' reduce the speed as the user might want - * freq_target to be zero - */ - if (this_dbs_info->requested_freq == policy->min) - return; - - this_dbs_info->requested_freq = policy->min; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, - CPUFREQ_RELATION_H); - return; - } -} - -static void do_dbs_timer(struct work_struct *work) -{ - int i; - - mutex_lock(&dbs_mutex); - for_each_online_cpu(i) - dbs_check_cpu(i); - schedule_delayed_work(&dbs_work, - usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); - mutex_unlock(&dbs_mutex); -} - -static inline void dbs_timer_init(void) -{ - init_timer_deferrable(&dbs_work.timer); - schedule_delayed_work(&dbs_work, - usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); - return; -} - -static inline void dbs_timer_exit(void) -{ - cancel_delayed_work(&dbs_work); - return; -} - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - if (this_dbs_info->enable) /* Already enabled */ - break; - - mutex_lock(&dbs_mutex); - - rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); - j_dbs_info->prev_cpu_idle_down - = j_dbs_info->prev_cpu_idle_up; - } - this_dbs_info->enable = 1; - this_dbs_info->down_skip = 0; - this_dbs_info->requested_freq = policy->cur; - - dbs_enable++; - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - unsigned int latency; - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - - def_sampling_rate = 10 * latency * - CONFIG_CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER; - - if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) - def_sampling_rate = MIN_STAT_SAMPLING_RATE; - - dbs_tuners_ins.sampling_rate = def_sampling_rate; - - dbs_timer_init(); - cpufreq_register_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } - - mutex_unlock(&dbs_mutex); - break; - - case CPUFREQ_GOV_STOP: - mutex_lock(&dbs_mutex); - this_dbs_info->enable = 0; - sysfs_remove_group(&policy->kobj, &dbs_attr_group); - dbs_enable--; - /* - * Stop the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 0) { - dbs_timer_exit(); - cpufreq_unregister_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } - - mutex_unlock(&dbs_mutex); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&dbs_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&dbs_mutex); - break; - } - return 0; -} - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_MINMAX -static -#endif -struct cpufreq_governor cpufreq_gov_minmax = { - .name = "minmax", - .governor = cpufreq_governor_dbs, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, -}; - -static int __init cpufreq_gov_dbs_init(void) -{ - return cpufreq_register_governor(&cpufreq_gov_minmax); -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - /* Make sure that the scheduled work is indeed not running */ - flush_scheduled_work(); - - cpufreq_unregister_governor(&cpufreq_gov_minmax); -} - -MODULE_AUTHOR ("Erasmux"); -MODULE_DESCRIPTION ("'cpufreq_minmax' - A dynamic cpufreq governor which " - "minimizes the frequecy jumps by always selecting either " - "the minimal or maximal frequency"); -MODULE_LICENSE ("GPL"); - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_MINMAX -fs_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); -#endif -module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_smartass.c b/drivers/cpufreq/cpufreq_smartass.c deleted file mode 100644 index 0ba3ee61..00000000 --- a/drivers/cpufreq/cpufreq_smartass.c +++ /dev/null @@ -1,642 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_smartass.c - * - * Copyright (C) 2010 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Author: Erasmux - * - * Based on the interactive governor By Mike Chan (mike@android.com) - * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net) - * - * requires to add - * EXPORT_SYMBOL_GPL(nr_running); - * at the end of kernel/sched.c - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static void (*pm_idle_old)(void); -static atomic_t active_count = ATOMIC_INIT(0); - -struct smartass_info_s { - struct cpufreq_policy *cur_policy; - struct timer_list timer; - u64 time_in_idle; - u64 idle_exit_time; - unsigned int force_ramp_up; - unsigned int enable; -}; -static DEFINE_PER_CPU(struct smartass_info_s, smartass_info); - -/* Workqueues handle frequency scaling */ -static struct workqueue_struct *up_wq; -static struct workqueue_struct *down_wq; -static struct work_struct freq_scale_work; - -static u64 freq_change_time; -static u64 freq_change_time_in_idle; - -static cpumask_t work_cpumask; -static unsigned int suspended; - - -/* - * The minimum amount of time to spend at a frequency before we can ramp down, - * default is 45ms. - */ -#define DEFAULT_RAMP_DOWN_RATE_NS 45000; -static unsigned long ramp_down_rate_ns; - -/* - * When ramping up frequency jump to at least this frequency. - */ - -#define DEFAULT_UP_MIN_FREQ (800*1000) -static unsigned int up_min_freq; - -/* - * When sleep_max_freq>0 the frequency when suspended will be capped - * by this frequency. Also will wake up at max frequency of policy - * to minimize wakeup issues. - * Set sleep_max_freq=0 to disable this behavior. - */ -#define DEFAULT_SLEEP_MAX_FREQ (400*1000) -static unsigned int sleep_max_freq; - -/* - * Sampling rate, I highly recommend to leave it at 2. - */ -#define DEFAULT_SAMPLE_RATE_JIFFIES 2 -static unsigned int sample_rate_jiffies; - -/* - * Max freqeuncy delta when ramping up. - */ - -#define DEFAULT_MAX_RAMP_UP (300 * 1000) -static unsigned int max_ramp_up; - -/* - * CPU freq will be increased if measured load > max_cpu_load; - */ -#define DEFAULT_MAX_CPU_LOAD 60 -static unsigned long max_cpu_load; - -/* - * CPU freq will be decreased if measured load < min_cpu_load; - */ -#define DEFAULT_MIN_CPU_LOAD 30 -static unsigned long min_cpu_load; - -//Leave this zero by default, people can tweak it if they so wish. -#define DEFAULT_RAMP_UP_RATE_NS 0 -static unsigned long ramp_up_rate_ns; - - -static int cpufreq_governor_smartass(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS -static -#endif -struct cpufreq_governor cpufreq_gov_smartass = { - .name = "smartass", - .governor = cpufreq_governor_smartass, - .max_transition_latency = 9000000, - .owner = THIS_MODULE, -}; - -static void cpufreq_smartass_timer(unsigned long data) -{ - u64 delta_idle; - u64 update_time; - u64 now_idle; - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, data); - struct cpufreq_policy *policy = this_smartass->cur_policy; - - now_idle = get_cpu_idle_time_us(data, &update_time); - - if (update_time == this_smartass->idle_exit_time) - return; - - delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle); - //printk(KERN_INFO "smartass: t=%llu i=%llu\n",cputime64_sub(update_time,this_smartass->idle_exit_time),delta_idle); - - /* Scale up if there were no idle cycles since coming out of idle */ - if (delta_idle == 0 && cputime64_sub(update_time, freq_change_time) > ramp_up_rate_ns) { - if (policy->cur == policy->max) - return; - - if (nr_running() < 1) - return; - - this_smartass->force_ramp_up = 1; - cpumask_set_cpu(data, &work_cpumask); - queue_work(up_wq, &freq_scale_work); - return; - } - - /* - * There is a window where if the cpu utlization can go from low to high - * between the timer expiring, delta_idle will be > 0 and the cpu will - * be 100% busy, preventing idle from running, and this timer from - * firing. So setup another timer to fire to check cpu utlization. - * Do not setup the timer if there is no scheduled work. - */ - if (!timer_pending(&this_smartass->timer) && nr_running() > 0) { - this_smartass->time_in_idle = get_cpu_idle_time_us( - data, &this_smartass->idle_exit_time); - mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies); - } - - if (policy->cur == policy->min) - return; - - /* - * Do not scale down unless we have been at this frequency for the - * minimum sample time. - */ - if (cputime64_sub(update_time, freq_change_time) < ramp_down_rate_ns) - return; - - - cpumask_set_cpu(data, &work_cpumask); - queue_work(down_wq, &freq_scale_work); -} - -static void cpufreq_idle(void) -{ - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); - struct cpufreq_policy *policy = this_smartass->cur_policy; - - pm_idle_old(); - - if (!cpumask_test_cpu(smp_processor_id(), policy->cpus)) - return; - - /* Timer to fire in 1-2 ticks, jiffie aligned. */ - if (timer_pending(&this_smartass->timer) == 0) { - this_smartass->time_in_idle = get_cpu_idle_time_us( - smp_processor_id(), &this_smartass->idle_exit_time); - mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies); - } -} - -/* - * Choose the cpu frequency based off the load. For now choose the minimum - * frequency that will satisfy the load, which is no -t always the lower power. - */ -static unsigned int cpufreq_smartass_calc_freq(unsigned int cpu, struct cpufreq_policy *policy) -{ - unsigned int delta_time; - unsigned int idle_time; - unsigned int cpu_load; - unsigned int new_freq; - u64 current_wall_time; - u64 current_idle_time; - - - current_idle_time = get_cpu_idle_time_us(cpu, ¤t_wall_time); - - idle_time = (unsigned int)( current_idle_time - freq_change_time_in_idle ); - delta_time = (unsigned int)( current_wall_time - freq_change_time ); - - cpu_load = 100 * (delta_time - idle_time) / delta_time; - if (cpu_load < min_cpu_load) { - //if the current frequency is below 1.2ghz, everything is 200mhz steps - if(policy->cur <= 1200000 && policy->cur >= 400000) { -/* catch the extra 200mhz gap between 400 and 800 when scaling down -netarchy */ - if(policy->cur == 800000) { - new_freq = policy->cur - 400000; - return new_freq; - } - else { - new_freq = policy->cur - 200000; - return new_freq; - } - } - //above 1.2ghz though, everything is 100mhz steps - else { - new_freq = policy->cur - 100000; - return new_freq; - } - } - if (cpu_load > max_cpu_load) { - if(policy->cur < 1200000 && policy->cur > 100000) { -/* catch the gap between 400 and 800 when scaling up -netarchy */ - if(policy->cur == 400000) { - new_freq = policy->cur + 400000; - return new_freq; - } - else { - new_freq = policy->cur + 200000; - return new_freq; - } - } - else { - new_freq = policy->cur + 100000; - return new_freq; - } - } - return policy->cur; -} - -/* We use the same work function to sale up and down */ -static void cpufreq_smartass_freq_change_time_work(struct work_struct *work) -{ - unsigned int cpu; - unsigned int new_freq; - struct smartass_info_s *this_smartass; - struct cpufreq_policy *policy; - cpumask_t tmp_mask = work_cpumask; - for_each_cpu(cpu, tmp_mask) { - this_smartass = &per_cpu(smartass_info, cpu); - policy = this_smartass->cur_policy; - - if (this_smartass->force_ramp_up) { - this_smartass->force_ramp_up = 0; - - if (nr_running() == 1) { - cpumask_clear_cpu(cpu, &work_cpumask); - return; - } - - if (policy->cur == policy->max) - return; - - new_freq = policy->cur + max_ramp_up; - - if (suspended && sleep_max_freq) { - if (new_freq > sleep_max_freq) - new_freq = sleep_max_freq; - } else { - if (new_freq < up_min_freq) - new_freq = up_min_freq; - } - - } else { - new_freq = cpufreq_smartass_calc_freq(cpu,policy); - - // in suspend limit to sleep_max_freq and - // jump straight to sleep_max_freq to avoid wakeup problems - if (suspended && sleep_max_freq && - (new_freq > sleep_max_freq || new_freq > policy->cur)) - new_freq = sleep_max_freq; - } - - if (new_freq > policy->max) - new_freq = policy->max; - - if (new_freq < policy->min) - new_freq = policy->min; - - __cpufreq_driver_target(policy, new_freq, - CPUFREQ_RELATION_L); - - freq_change_time_in_idle = get_cpu_idle_time_us(cpu, - &freq_change_time); - - cpumask_clear_cpu(cpu, &work_cpumask); - - } - - -} - -static ssize_t show_ramp_up_rate_ns(struct cpufreq_policy *policy, char *buf) { - return sprintf(buf, "%lu\n", ramp_up_rate_ns); -} - -static ssize_t store_ramp_up_rate_ns(struct cpufreq_policy *policy, const char *buf, size_t count) { - ssize_t ret; - unsigned long input; - ret = strict_strtoul(buf, 0, &input); - if (ret >= 0 && input >= 0 && input <= 100000000) - ramp_up_rate_ns = input; - return ret; -} - -static struct freq_attr ramp_up_rate_ns_attr = __ATTR(ramp_up_rate_ns, 0644, - show_ramp_up_rate_ns, store_ramp_up_rate_ns); - -static ssize_t show_ramp_down_rate_ns(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%lu\n", ramp_down_rate_ns); -} - -static ssize_t store_ramp_down_rate_ns(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 1000 && input <= 100000000) - ramp_down_rate_ns = input; - return res; -} - -static struct freq_attr ramp_down_rate_ns_attr = __ATTR(ramp_down_rate_ns, 0644, - show_ramp_down_rate_ns, store_ramp_down_rate_ns); - -static ssize_t show_up_min_freq(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", up_min_freq); -} - -static ssize_t store_up_min_freq(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - up_min_freq = input; - return res; -} - -static struct freq_attr up_min_freq_attr = __ATTR(up_min_freq, 0644, - show_up_min_freq, store_up_min_freq); - -static ssize_t show_sleep_max_freq(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", sleep_max_freq); -} - -static ssize_t store_sleep_max_freq(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - sleep_max_freq = input; - return res; -} - -static struct freq_attr sleep_max_freq_attr = __ATTR(sleep_max_freq, 0644, - show_sleep_max_freq, store_sleep_max_freq); - -static ssize_t show_sample_rate_jiffies(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", sample_rate_jiffies); -} - -static ssize_t store_sample_rate_jiffies(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input <= 1000) - sample_rate_jiffies = input; - return res; -} - -static struct freq_attr sample_rate_jiffies_attr = __ATTR(sample_rate_jiffies, 0644, - show_sample_rate_jiffies, store_sample_rate_jiffies); - -static ssize_t show_max_ramp_up(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", max_ramp_up); -} - -static ssize_t store_max_ramp_up(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 10000) - max_ramp_up = input; - return res; -} - -static struct freq_attr max_ramp_up_attr = __ATTR(max_ramp_up, 0644, - show_max_ramp_up, store_max_ramp_up); - -static ssize_t show_max_cpu_load(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%lu\n", max_cpu_load); -} - -static ssize_t store_max_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input <= 100) - max_cpu_load = input; - return res; -} - -static struct freq_attr max_cpu_load_attr = __ATTR(max_cpu_load, 0644, - show_max_cpu_load, store_max_cpu_load); - -static ssize_t show_min_cpu_load(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%lu\n", min_cpu_load); -} - -static ssize_t store_min_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input < 100) - min_cpu_load = input; - return res; -} - -static struct freq_attr min_cpu_load_attr = __ATTR(min_cpu_load, 0644, - show_min_cpu_load, store_min_cpu_load); - -static struct attribute * smartass_attributes[] = { - &ramp_down_rate_ns_attr.attr, - &up_min_freq_attr.attr, - &sleep_max_freq_attr.attr, - &sample_rate_jiffies_attr.attr, - &max_ramp_up_attr.attr, - &max_cpu_load_attr.attr, - &min_cpu_load_attr.attr, - &ramp_up_rate_ns_attr.attr, - NULL, -}; - -static struct attribute_group smartass_attr_group = { - .attrs = smartass_attributes, - .name = "smartass", -}; - -static int cpufreq_governor_smartass(struct cpufreq_policy *new_policy, - unsigned int event) -{ - unsigned int cpu = new_policy->cpu; - int rc; - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!new_policy->cur)) - return -EINVAL; - - if (this_smartass->enable) /* Already enabled */ - break; - - /* - * Do not register the idle hook and create sysfs - * entries if we have already done so. - */ - if (atomic_inc_return(&active_count) > 1) - return 0; - - rc = sysfs_create_group(&new_policy->kobj, &smartass_attr_group); - if (rc) - return rc; - pm_idle_old = pm_idle; - pm_idle = cpufreq_idle; - - this_smartass->cur_policy = new_policy; - this_smartass->enable = 1; - - // notice no break here! - - case CPUFREQ_GOV_LIMITS: - if (this_smartass->cur_policy->cur != new_policy->max) - __cpufreq_driver_target(new_policy, new_policy->max, CPUFREQ_RELATION_H); - - break; - - case CPUFREQ_GOV_STOP: - this_smartass->enable = 0; - - if (atomic_dec_return(&active_count) > 1) - return 0; - sysfs_remove_group(&new_policy->kobj, - &smartass_attr_group); - - pm_idle = pm_idle_old; - del_timer(&this_smartass->timer); - break; - } - - return 0; -} - -static void smartass_suspend(int cpu, int suspend) -{ - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); - struct cpufreq_policy *policy = this_smartass->cur_policy; - unsigned int new_freq; - - if (!this_smartass->enable || sleep_max_freq==0) // disable behavior for sleep_max_freq==0 - return; - - if (suspend) { - if (policy->cur > sleep_max_freq) { - new_freq = sleep_max_freq; - if (new_freq > policy->max) - new_freq = policy->max; - if (new_freq < policy->min) - new_freq = policy->min; - __cpufreq_driver_target(policy, new_freq, - CPUFREQ_RELATION_H); - } - } else { // resume at max speed: - __cpufreq_driver_target(policy, policy->max, - CPUFREQ_RELATION_H); - } - -} - -static void smartass_early_suspend(struct early_suspend *handler) { - int i; - suspended = 1; - for_each_online_cpu(i) - smartass_suspend(i,1); -} - -static void smartass_late_resume(struct early_suspend *handler) { - int i; - suspended = 0; - for_each_online_cpu(i) - smartass_suspend(i,0); -} - -static struct early_suspend smartass_power_suspend = { - .suspend = smartass_early_suspend, - .resume = smartass_late_resume, -}; - -static int __init cpufreq_smartass_init(void) -{ - unsigned int i; - struct smartass_info_s *this_smartass; - ramp_down_rate_ns = DEFAULT_RAMP_DOWN_RATE_NS; - up_min_freq = DEFAULT_UP_MIN_FREQ; - sleep_max_freq = DEFAULT_SLEEP_MAX_FREQ; - sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES; - max_ramp_up = DEFAULT_MAX_RAMP_UP; - max_cpu_load = DEFAULT_MAX_CPU_LOAD; - min_cpu_load = DEFAULT_MIN_CPU_LOAD; - ramp_up_rate_ns = DEFAULT_RAMP_UP_RATE_NS; - - suspended = 0; - - /* Initalize per-cpu data: */ - for_each_possible_cpu(i) { - this_smartass = &per_cpu(smartass_info, i); - this_smartass->enable = 0; - this_smartass->force_ramp_up = 0; - this_smartass->time_in_idle = 0; - this_smartass->idle_exit_time = 0; - // intialize timer: - init_timer_deferrable(&this_smartass->timer); - this_smartass->timer.function = cpufreq_smartass_timer; - this_smartass->timer.data = i; - } - - /* Scale up is high priority */ - up_wq = create_workqueue("ksmartass_up"); - down_wq = create_workqueue("ksmartass_down"); - - INIT_WORK(&freq_scale_work, cpufreq_smartass_freq_change_time_work); - - register_early_suspend(&smartass_power_suspend); - - return cpufreq_register_governor(&cpufreq_gov_smartass); -} - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS -pure_initcall(cpufreq_smartass_init); -#else -module_init(cpufreq_smartass_init); -#endif - -static void __exit cpufreq_smartass_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_smartass); - destroy_workqueue(up_wq); - destroy_workqueue(down_wq); -} - -module_exit(cpufreq_smartass_exit); - -MODULE_AUTHOR ("Erasmux"); -MODULE_DESCRIPTION ("'cpufreq_minmax' - A smart cpufreq governor optimized for the hero!"); -MODULE_LICENSE ("GPL"); - diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 8a6a819b..f265babc 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -25,9 +24,6 @@ #define CPUFREQ_NAME_LEN 16 -/********************************************************************* - * CPUFREQ NOTIFIER INTERFACE * - *********************************************************************/ #define CPUFREQ_TRANSITION_NOTIFIER (0) #define CPUFREQ_POLICY_NOTIFIER (1) @@ -50,10 +46,6 @@ static inline int cpufreq_unregister_notifier(struct notifier_block *nb, static inline void disable_cpufreq(void) { } #endif -/* if (cpufreq_driver->target) exists, the ->governor decides what frequency - * within the limits is used. If (cpufreq_driver->setpolicy> exists, these - * two generic policies are available: - */ #define CPUFREQ_POLICY_POWERSAVE (1) #define CPUFREQ_POLICY_PERFORMANCE (2) @@ -63,7 +55,6 @@ static inline void disable_cpufreq(void) { } struct cpufreq_governor; -/* /sys/devices/system/cpu/cpufreq: entry point for global variables */ extern struct kobject *cpufreq_global_kobject; #define CPUFREQ_ETERNAL (-1) @@ -114,7 +105,6 @@ struct cpufreq_policy { #define CPUFREQ_SHARED_TYPE_ALL (2) #define CPUFREQ_SHARED_TYPE_ANY (3) -/******************** cpufreq transition notifiers *******************/ #define CPUFREQ_PRECHANGE (0) #define CPUFREQ_POSTCHANGE (1) @@ -129,15 +119,6 @@ struct cpufreq_freqs { }; -/** - * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch safe) - * @old: old value - * @div: divisor - * @mult: multiplier - * - * - * new = old * mult / div - */ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mult) { #if BITS_PER_LONG == 32 @@ -155,9 +136,6 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mu #endif }; -/********************************************************************* - * CPUFREQ GOVERNORS * - *********************************************************************/ #define CPUFREQ_GOV_START 1 #define CPUFREQ_GOV_STOP 2 @@ -176,9 +154,6 @@ struct cpufreq_governor { struct module *owner; }; -/* - * Pass a target to the cpufreq driver. - */ extern int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation); @@ -236,7 +211,6 @@ struct cpufreq_driver { struct freq_attr **attr; }; -/* flags */ #define CPUFREQ_STICKY 0x01 #define CPUFREQ_CONST_LOOPS 0x02 @@ -300,14 +274,10 @@ static struct global_attr _name = \ __ATTR(_name, 0644, show_##_name, store_##_name) -/********************************************************************* - * CPUFREQ 2.6. INTERFACE * - *********************************************************************/ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_update_policy(unsigned int cpu); #ifdef CONFIG_CPU_FREQ -/* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */ unsigned int cpufreq_get(unsigned int cpu); #else static inline unsigned int cpufreq_get(unsigned int cpu) @@ -316,7 +286,6 @@ static inline unsigned int cpufreq_get(unsigned int cpu) } #endif -/* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ #ifdef CONFIG_CPU_FREQ unsigned int cpufreq_quick_get(unsigned int cpu); unsigned int cpufreq_quick_get_max(unsigned int cpu); @@ -332,15 +301,8 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) #endif -/********************************************************************* - * CPUFREQ DEFAULT GOVERNOR * - *********************************************************************/ -/* - Performance governor is fallback governor if any other gov failed to - auto load due latency restrictions -*/ #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE extern struct cpufreq_governor cpufreq_gov_performance; #endif @@ -358,12 +320,13 @@ extern struct cpufreq_governor cpufreq_gov_ondemand; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE) extern struct cpufreq_governor cpufreq_gov_conservative; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative) -#endif#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE) extern struct cpufreq_governor cpufreq_gov_interactive; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactive) #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND) extern struct cpufreq_governor cpufreq_gov_intellidemand; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_intellidemand) +<<<<<<< HEAD #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2) extern struct cpufreq_governor cpufreq_gov_smartass2; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_smartass2) @@ -403,12 +366,11 @@ extern struct cpufreq_governor cpufreq_gov_ondemandx; #elif defined(CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX) extern struct cpufreq_governor cpufreq_gov_brazilianwax; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_brazilianwax) +======= +>>>>>>> parent of 7e0f70e... Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor #endif -/********************************************************************* - * FREQUENCY TABLE HELPERS * - *********************************************************************/ #define CPUFREQ_ENTRY_INVALID ~0 #define CPUFREQ_TABLE_END ~1 @@ -431,12 +393,10 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, unsigned int relation, unsigned int *index); -/* the following 3 funtions are for cpufreq core use only */ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); void cpufreq_cpu_put(struct cpufreq_policy *data); -/* the following are really really optional */ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, From dda172296518444433caa7b907210e1f0706d627 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 10:22:35 -0400 Subject: [PATCH 15/35] Revert "Revert "Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor"" This reverts commit 8cee675cd628c83eeec000ed787ead5f8ee5127c. --- drivers/cpufreq/Kconfig | 45 - drivers/cpufreq/Makefile | 19 +- drivers/cpufreq/cpufreq_interactivex.c | 381 ++++++++ drivers/cpufreq/cpufreq_lagfree.c | 662 ++++++++++++++ drivers/cpufreq/cpufreq_lulzactive.c | 1143 ++++++++++++++++++++++++ drivers/cpufreq/cpufreq_minmax.c | 575 ++++++++++++ drivers/cpufreq/cpufreq_smartass.c | 642 +++++++++++++ include/linux/cpufreq.h | 48 +- 8 files changed, 3449 insertions(+), 66 deletions(-) create mode 100644 drivers/cpufreq/cpufreq_interactivex.c create mode 100644 drivers/cpufreq/cpufreq_lagfree.c create mode 100644 drivers/cpufreq/cpufreq_lulzactive.c create mode 100644 drivers/cpufreq/cpufreq_minmax.c create mode 100644 drivers/cpufreq/cpufreq_smartass.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 50e1a72d..e0008dd1 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -230,7 +230,6 @@ config CPU_FREQ_GOV_BADASS module will be called cpufreq_badass. If in doubt, say N. -<<<<<<< HEAD config CPU_FREQ_DEFAULT_GOV_MINMAX bool "minmax" select CPU_FREQ_GOV_MINMAX @@ -240,13 +239,10 @@ config CPU_FREQ_DEFAULT_GOV_MINMAX frequency jumps does by the governor. This is aimed at maximizing both perfomance and battery life. -======= ->>>>>>> parent of 7e0f70e... Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor config CPU_FREQ_GOV_CONSERVATIVE tristate "'conservative' cpufreq governor" depends on CPU_FREQ help -<<<<<<< HEAD config CPU_FREQ_DEFAULT_GOV_SMARTASS2 bool "smartass2" @@ -295,38 +291,6 @@ config CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND bool "intellidemand" select CPU_FREQ_GOV_INTELLIDEMAND select CPU_FREQ_GOV_PERFORMANCE -======= - 'conservative' - this driver is rather similar to the 'ondemand' - governor both in its source code and its purpose, the difference is - its optimisation for better suitability in a battery powered - environment. The frequency is gracefully increased and decreased - rather than jumping to 100% when speed is required. - - If you have a desktop machine then you should really be considering - the 'ondemand' governor instead, however if you are using a laptop, - PDA or even an AMD64 based computer (due to the unacceptable - step-by-step latency issues between the minimum and maximum frequency - transitions in the CPU) you will probably want to use this governor. - - To compile this driver as a module, choose M here: the - module will be called cpufreq_conservative. - - For details, take a look at linux/Documentation/cpu-freq. - - If in doubt, say N. - -config CPU_FREQ_GOV_DANCEDANCE - tristate "'dancedance' cpufreq governor" - depends on CPU_FREQ - -config CPU_FREQ_GOV_NIGHTMARE - tristate "'nightmare' cpufreq governor" - depends on CPU_FREQ - -config CPU_FREQ_GOV_ONDEMAND - tristate "'ondemand' cpufreq policy governor" - select CPU_FREQ_TABLE ->>>>>>> parent of 7e0f70e... Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor help config CPU_FREQ_DEFAULT_GOV_SCARY @@ -416,7 +380,6 @@ config CPU_FREQ_GOV_USERSPACE If in doubt, say Y. -<<<<<<< HEAD config CPU_FREQ_GOV_ONDEMAND tristate "'ondemand' cpufreq policy governor" select CPU_FREQ_TABLE @@ -464,13 +427,10 @@ config CPU_FREQ_GOV_LULZACTIVE help 'lulzactive' - a new interactive governor by Tegrak! -======= ->>>>>>> parent of 7e0f70e... Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor config CPU_FREQ_GOV_WHEATLEY tristate "'wheatley' cpufreq governor" depends on CPU_FREQ -<<<<<<< HEAD config CPU_FREQ_GOV_SMARTASS tristate "'smartass' cpufreq governor" depends on CPU_FREQ @@ -521,8 +481,6 @@ config CPU_FREQ_MIN_TICKS help Minimum number of ticks between polling interval for governors. -======= ->>>>>>> parent of 7e0f70e... Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor config SEC_DVFS bool "DVFS job" default n @@ -533,7 +491,6 @@ config SEC_DVFS_BOOSTER default y depends on SEC_DVFS -<<<<<<< HEAD config CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER int "Sampling rate multiplier for governors." default 1000 @@ -583,8 +540,6 @@ config CPU_FREQ_GOV_BRAZILIANWAX If in doubt, say Y. -======= ->>>>>>> parent of 7e0f70e... Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 34dad87b..22e5500d 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -20,7 +20,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o -<<<<<<< HEAD obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o obj-$(CONFIG_CPU_FREQ_GOV_MINMAX) += cpufreq_minmax.o @@ -38,8 +37,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o obj-$(CONFIG_CPU_FREQ_GOV_SAVAGEDZEN) += cpufreq_savagedzen.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMANDX) += cpufreq_ondemandx.o obj-$(CONFIG_CPU_FREQ_GOV_BRAZILIANWAX) += cpufreq_brazilianwax.o -======= ->>>>>>> parent of 7e0f70e... Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o @@ -68,19 +65,7 @@ obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o -################################################################################## +##################################################################################d + # ARM SoC drivers obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o -obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o -obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o -obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o -obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o -obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o -obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o -obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o -obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o - -################################################################################## -# PowerPC platform drivers -obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o -obj-$(CONFIG_MSM_DCVS) += cpufreq_gov_msm.o diff --git a/drivers/cpufreq/cpufreq_interactivex.c b/drivers/cpufreq/cpufreq_interactivex.c new file mode 100644 index 00000000..72ca6291 --- /dev/null +++ b/drivers/cpufreq/cpufreq_interactivex.c @@ -0,0 +1,381 @@ +/* +* drivers/cpufreq/cpufreq_interactivex.c +* +* Copyright (C) 2010 Google, Inc. +* +* This software is licensed under the terms of the GNU General Public +* License version 2, as published by the Free Software Foundation, and +* may be copied, distributed, and modified under those terms. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* Author: Mike Chan (mike@android.com) - modified for suspend/wake by imoseyon +* +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static void (*pm_idle_old)(void); +static atomic_t active_count = ATOMIC_INIT(0); + +static DEFINE_PER_CPU(struct timer_list, cpu_timer); + +static DEFINE_PER_CPU(u64, time_in_idle); +static DEFINE_PER_CPU(u64, idle_exit_time); + +static struct cpufreq_policy *policy; +static unsigned int target_freq; + +/* Workqueues handle frequency scaling */ +static struct workqueue_struct *up_wq; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_work; + +static u64 freq_change_time; +static u64 freq_change_time_in_idle; + +static cpumask_t work_cpumask; + +static unsigned int suspended = 0; +static unsigned int enabled = 0; + +/* +* The minimum ammount of time to spend at a frequency before we can ramp down, +* default is 50ms. +*/ +#define DEFAULT_MIN_SAMPLE_TIME 50000; +static unsigned long min_sample_time; + +#define FREQ_THRESHOLD 998400; +#define RESUME_SPEED 998400; + +static int cpufreq_governor_interactivex(struct cpufreq_policy *policy, +unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX +static +#endif +struct cpufreq_governor cpufreq_gov_interactivex = { +.name = "interactiveX", +.governor = cpufreq_governor_interactivex, +#if defined(CONFIG_ARCH_MSM_SCORPION) +.max_transition_latency = 8000000, +#else +.max_transition_latency = 10000000, +#endif +.owner = THIS_MODULE, +}; + +static void cpufreq_interactivex_timer(unsigned long data) +{ +u64 delta_idle; +u64 update_time; +u64 *cpu_time_in_idle; +u64 *cpu_idle_exit_time; +struct timer_list *t; + +u64 now_idle = get_cpu_idle_time_us(data, +&update_time); + + +cpu_time_in_idle = &per_cpu(time_in_idle, data); +cpu_idle_exit_time = &per_cpu(idle_exit_time, data); + +if (update_time == *cpu_idle_exit_time) +return; + +delta_idle = cputime64_sub(now_idle, *cpu_time_in_idle); + +/* Scale up if there were no idle cycles since coming out of idle */ +if (delta_idle == 0) { +if (policy->cur == policy->max) +return; + +if (nr_running() < 1) +return; + +target_freq = policy->max; + +cpumask_set_cpu(data, &work_cpumask); +queue_work(up_wq, &freq_scale_work); +return; +} + +/* +* There is a window where if the cpu utlization can go from low to high +* between the timer expiring, delta_idle will be > 0 and the cpu will +* be 100% busy, preventing idle from running, and this timer from +* firing. So setup another timer to fire to check cpu utlization. +* Do not setup the timer if there is no scheduled work. +*/ +t = &per_cpu(cpu_timer, data); +if (!timer_pending(t) && nr_running() > 0) { +*cpu_time_in_idle = get_cpu_idle_time_us( +data, cpu_idle_exit_time); +mod_timer(t, jiffies + 2); +} + +if (policy->cur == policy->min) +return; + +/* +* Do not scale down unless we have been at this frequency for the +* minimum sample time. +*/ +if (cputime64_sub(update_time, freq_change_time) < min_sample_time) +return; + +target_freq = policy->min; +cpumask_set_cpu(data, &work_cpumask); +queue_work(down_wq, &freq_scale_work); +} + +static void cpufreq_idle(void) +{ +struct timer_list *t; +u64 *cpu_time_in_idle; +u64 *cpu_idle_exit_time; + +pm_idle_old(); + +if (!cpumask_test_cpu(smp_processor_id(), policy->cpus)) +return; + +/* Timer to fire in 1-2 ticks, jiffie aligned. */ +t = &per_cpu(cpu_timer, smp_processor_id()); +cpu_idle_exit_time = &per_cpu(idle_exit_time, smp_processor_id()); +cpu_time_in_idle = &per_cpu(time_in_idle, smp_processor_id()); + +if (timer_pending(t) == 0) { +*cpu_time_in_idle = get_cpu_idle_time_us( +smp_processor_id(), cpu_idle_exit_time); +mod_timer(t, jiffies + 2); +} +} + +/* +* Choose the cpu frequency based off the load. For now choose the minimum +* frequency that will satisfy the load, which is not always the lower power. +*/ +static unsigned int cpufreq_interactivex_calc_freq(unsigned int cpu) +{ +unsigned int delta_time; +unsigned int idle_time; +unsigned int cpu_load; +unsigned int newfreq; +u64 current_wall_time; +u64 current_idle_time;; + +current_idle_time = get_cpu_idle_time_us(cpu, ¤t_wall_time); + +idle_time = (unsigned int) current_idle_time - freq_change_time_in_idle; +delta_time = (unsigned int) current_wall_time - freq_change_time; + +cpu_load = 100 * (delta_time - idle_time) / delta_time; + +if (cpu_load > 98) newfreq = policy->max; +else newfreq = policy->cur * cpu_load / 100; + +return newfreq; +} + + +/* We use the same work function to sale up and down */ +static void cpufreq_interactivex_freq_change_time_work(struct work_struct *work) +{ +unsigned int cpu; +unsigned int newtarget; +cpumask_t tmp_mask = work_cpumask; +newtarget = FREQ_THRESHOLD; + +for_each_cpu(cpu, &tmp_mask) { +if (!suspended) { +if (target_freq == policy->max) { +if (nr_running() == 1) { +cpumask_clear_cpu(cpu, &work_cpumask); +return; +} +// __cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_H); +__cpufreq_driver_target(policy, newtarget, CPUFREQ_RELATION_H); +} else { +target_freq = cpufreq_interactivex_calc_freq(cpu); +__cpufreq_driver_target(policy, target_freq, +CPUFREQ_RELATION_L); +} +} +freq_change_time_in_idle = get_cpu_idle_time_us(cpu, &freq_change_time); +cpumask_clear_cpu(cpu, &work_cpumask); +} + + +} + +static ssize_t show_min_sample_time(struct kobject *kobj, +struct attribute *attr, char *buf) +{ +return sprintf(buf, "%lu\n", min_sample_time); +} + +static ssize_t store_min_sample_time(struct kobject *kobj, +struct attribute *attr, const char *buf, size_t count) +{ +return strict_strtoul(buf, 0, &min_sample_time); +} + +static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644, +show_min_sample_time, store_min_sample_time); + +static struct attribute *interactivex_attributes[] = { +&min_sample_time_attr.attr, +NULL, +}; + +static struct attribute_group interactivex_attr_group = { +.attrs = interactivex_attributes, +.name = "interactiveX", +}; + +static void interactivex_suspend(int suspend) +{ +unsigned int max_speed; + +max_speed = RESUME_SPEED; + +if (!enabled) return; + if (!suspend) { // resume at max speed: +suspended = 0; + __cpufreq_driver_target(policy, max_speed, CPUFREQ_RELATION_L); + pr_info("[imoseyon] interactiveX awake at %d\n", policy->cur); + } else { +suspended = 1; + __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); + pr_info("[imoseyon] interactiveX suspended at %d\n", policy->cur); + } +} + +static void interactivex_early_suspend(struct early_suspend *handler) { + interactivex_suspend(1); +} + +static void interactivex_late_resume(struct early_suspend *handler) { + interactivex_suspend(0); +} + +static struct early_suspend interactivex_power_suspend = { + .suspend = interactivex_early_suspend, + .resume = interactivex_late_resume, + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +}; + +static int cpufreq_governor_interactivex(struct cpufreq_policy *new_policy, +unsigned int event) +{ +int rc; +switch (event) { +case CPUFREQ_GOV_START: +if (!cpu_online(new_policy->cpu)) +return -EINVAL; + +/* +* Do not register the idle hook and create sysfs +* entries if we have already done so. +*/ +if (atomic_inc_return(&active_count) > 1) +return 0; + +rc = sysfs_create_group(cpufreq_global_kobject, +&interactivex_attr_group); +if (rc) +return rc; + +pm_idle_old = pm_idle; +pm_idle = cpufreq_idle; +policy = new_policy; +enabled = 1; + register_early_suspend(&interactivex_power_suspend); + pr_info("[imoseyon] interactiveX active\n"); +break; + +case CPUFREQ_GOV_STOP: +if (atomic_dec_return(&active_count) > 1) +return 0; + +sysfs_remove_group(cpufreq_global_kobject, +&interactivex_attr_group); + +pm_idle = pm_idle_old; +del_timer(&per_cpu(cpu_timer, new_policy->cpu)); +enabled = 0; + unregister_early_suspend(&interactivex_power_suspend); + pr_info("[imoseyon] interactiveX inactive\n"); +break; + +case CPUFREQ_GOV_LIMITS: +if (new_policy->max < new_policy->cur) +__cpufreq_driver_target(new_policy, +new_policy->max, CPUFREQ_RELATION_H); +else if (new_policy->min > new_policy->cur) +__cpufreq_driver_target(new_policy, +new_policy->min, CPUFREQ_RELATION_L); +break; +} +return 0; +} + +static int __init cpufreq_interactivex_init(void) +{ +unsigned int i; +struct timer_list *t; +min_sample_time = DEFAULT_MIN_SAMPLE_TIME; + +/* Initalize per-cpu timers */ +for_each_possible_cpu(i) { +t = &per_cpu(cpu_timer, i); +init_timer_deferrable(t); +t->function = cpufreq_interactivex_timer; +t->data = i; +} + +/* Scale up is high priority */ +up_wq = create_workqueue("kinteractive_up"); +down_wq = create_workqueue("knteractive_down"); + +INIT_WORK(&freq_scale_work, cpufreq_interactivex_freq_change_time_work); + + pr_info("[imoseyon] interactiveX enter\n"); +return cpufreq_register_governor(&cpufreq_gov_interactivex); +} + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX +fs_initcall(cpufreq_interactivex_init); +#else +module_init(cpufreq_interactivex_init); +#endif + +static void __exit cpufreq_interactivex_exit(void) +{ + pr_info("[imoseyon] interactiveX exit\n"); +cpufreq_unregister_governor(&cpufreq_gov_interactivex); +destroy_workqueue(up_wq); +destroy_workqueue(down_wq); +} + +module_exit(cpufreq_interactivex_exit); + +MODULE_AUTHOR("Mike Chan "); +MODULE_DESCRIPTION("'cpufreq_interactiveX' - A cpufreq governor for " +"Latency sensitive workloads"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq_lagfree.c b/drivers/cpufreq/cpufreq_lagfree.c new file mode 100644 index 00000000..bf274a11 --- /dev/null +++ b/drivers/cpufreq/cpufreq_lagfree.c @@ -0,0 +1,662 @@ +/* + * drivers/cpufreq/cpufreq_lagfree.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * (C) 2004 Alexander Clouter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_UP_THRESHOLD (50) +#define DEF_FREQUENCY_DOWN_THRESHOLD (15) +#define FREQ_STEP_DOWN (160000) +#define FREQ_SLEEP_MAX (320000) +#define FREQ_AWAKE_MIN (480000) +#define FREQ_STEP_UP_SLEEP_PERCENT (20) + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers + * with CPUFREQ_ETERNAL), this governor will not work. + * All times here are in uS. + */ +static unsigned int def_sampling_rate; +unsigned int suspended = 0; +#define MIN_SAMPLING_RATE_RATIO (2) +/* for correct statistics, we need at least 10 ticks between each measure */ +#define MIN_STAT_SAMPLING_RATE \ + (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(CONFIG_CPU_FREQ_MIN_TICKS)) +#define MIN_SAMPLING_RATE \ + (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) +#define MAX_SAMPLING_RATE (500 * def_sampling_rate) +#define DEF_SAMPLING_DOWN_FACTOR (4) +#define MAX_SAMPLING_DOWN_FACTOR (10) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +static void do_dbs_timer(struct work_struct *work); + +struct cpu_dbs_info_s { + struct cpufreq_policy *cur_policy; + unsigned int prev_cpu_idle_up; + unsigned int prev_cpu_idle_down; + unsigned int enable; + unsigned int down_skip; + unsigned int requested_freq; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug + * lock and dbs_mutex. cpu_hotplug lock should always be held before + * dbs_mutex. If any function that can potentially take cpu_hotplug lock + * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then + * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock + * is recursive for the same process. -Venki + */ +static DEFINE_MUTEX (dbs_mutex); +static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); + +struct dbs_tuners { + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int ignore_nice; + //unsigned int freq_step; +}; + +static struct dbs_tuners dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .ignore_nice = 1, + //.freq_step = 5, +}; + +static inline unsigned int get_cpu_idle_time(unsigned int cpu) +{ + unsigned int add_nice = 0, ret; + + if (dbs_tuners_ins.ignore_nice) + add_nice = kstat_cpu(cpu).cpustat.nice; + + ret = kstat_cpu(cpu).cpustat.idle + + kstat_cpu(cpu).cpustat.iowait + + add_nice; + + return ret; +} + +/* keep track of frequency transitions */ +static int +dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, + freq->cpu); + + if (!this_dbs_info->enable) + return 0; + + this_dbs_info->requested_freq = freq->new; + + return 0; +} + +static struct notifier_block dbs_cpufreq_notifier_block = { + .notifier_call = dbs_cpufreq_notifier +}; + +/************************** sysfs interface ************************/ +static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) +{ + return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); +} + +static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) +{ + return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); +} + +#define define_one_ro(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +define_one_ro(sampling_rate_max); +define_one_ro(sampling_rate_min); + +/* cpufreq_lagfree Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct cpufreq_policy *unused, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(sampling_down_factor, sampling_down_factor); +show_one(up_threshold, up_threshold); +show_one(down_threshold, down_threshold); +show_one(ignore_nice_load, ignore_nice); +//show_one(freq_step, freq_step); + +static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf (buf, "%u", &input); + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_down_factor = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_sampling_rate(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf (buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.sampling_rate = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_up_threshold(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf (buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.up_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_down_threshold(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf (buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.down_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + mutex_lock(&dbs_mutex); + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + mutex_unlock(&dbs_mutex); + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); + j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; + } + mutex_unlock(&dbs_mutex); + + return count; +} + +/*static ssize_t store_freq_step(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input > 100) + input = 100; + + / * no need to test here if freq_step is zero as the user might actually + * want this, they would be crazy though :) * / + mutex_lock(&dbs_mutex); + dbs_tuners_ins.freq_step = input; + mutex_unlock(&dbs_mutex); + + return count; +}*/ + +#define define_one_rw(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + +define_one_rw(sampling_rate); +define_one_rw(sampling_down_factor); +define_one_rw(up_threshold); +define_one_rw(down_threshold); +define_one_rw(ignore_nice_load); +//define_one_rw(freq_step); + +static struct attribute * dbs_attributes[] = { + &sampling_rate_max.attr, + &sampling_rate_min.attr, + &sampling_rate.attr, + &sampling_down_factor.attr, + &up_threshold.attr, + &down_threshold.attr, + &ignore_nice_load.attr, + //&freq_step.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "lagfree", +}; + +/************************** sysfs end ************************/ + +static void dbs_check_cpu(int cpu) +{ + unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; + unsigned int tmp_idle_ticks, total_idle_ticks; + unsigned int freq_target; + unsigned int freq_down_sampling_rate; + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); + struct cpufreq_policy *policy; + + if (!this_dbs_info->enable) + return; + + policy = this_dbs_info->cur_policy; + + /* + * The default safe range is 20% to 80% + * Every sampling_rate, we check + * - If current idle time is less than 20%, then we try to + * increase frequency + * Every sampling_rate*sampling_down_factor, we check + * - If current idle time is more than 80%, then we try to + * decrease frequency + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of max_frequency + */ + + /* Check for frequency increase */ + idle_ticks = UINT_MAX; + + /* Check for frequency increase */ + total_idle_ticks = get_cpu_idle_time(cpu); + tmp_idle_ticks = total_idle_ticks - + this_dbs_info->prev_cpu_idle_up; + this_dbs_info->prev_cpu_idle_up = total_idle_ticks; + + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; + + /* Scale idle ticks by 100 and compare with up and down ticks */ + idle_ticks *= 100; + up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * + usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + if (idle_ticks < up_idle_ticks) { + this_dbs_info->down_skip = 0; + this_dbs_info->prev_cpu_idle_down = + this_dbs_info->prev_cpu_idle_up; + + /* if we are already at full speed then break out early */ + if (this_dbs_info->requested_freq == policy->max && !suspended) + return; + + //freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; + if (suspended) + freq_target = (FREQ_STEP_UP_SLEEP_PERCENT * policy->max) / 100; + else + freq_target = policy->max; + + /* max freq cannot be less than 100. But who knows.... */ + if (unlikely(freq_target == 0)) + freq_target = 5; + + this_dbs_info->requested_freq += freq_target; + if (this_dbs_info->requested_freq > policy->max) + this_dbs_info->requested_freq = policy->max; + + //Screen off mode + if (suspended && this_dbs_info->requested_freq > FREQ_SLEEP_MAX) + this_dbs_info->requested_freq = FREQ_SLEEP_MAX; + + //Screen off mode + if (!suspended && this_dbs_info->requested_freq < FREQ_AWAKE_MIN) + this_dbs_info->requested_freq = FREQ_AWAKE_MIN; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, + CPUFREQ_RELATION_H); + return; + } + + /* Check for frequency decrease */ + this_dbs_info->down_skip++; + if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) + return; + + /* Check for frequency decrease */ + total_idle_ticks = this_dbs_info->prev_cpu_idle_up; + tmp_idle_ticks = total_idle_ticks - + this_dbs_info->prev_cpu_idle_down; + this_dbs_info->prev_cpu_idle_down = total_idle_ticks; + + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; + + /* Scale idle ticks by 100 and compare with up and down ticks */ + idle_ticks *= 100; + this_dbs_info->down_skip = 0; + + freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * + dbs_tuners_ins.sampling_down_factor; + down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * + usecs_to_jiffies(freq_down_sampling_rate); + + if (idle_ticks > down_idle_ticks) { + /* + * if we are already at the lowest speed then break out early + * or if we 'cannot' reduce the speed as the user might want + * freq_target to be zero + */ + if (this_dbs_info->requested_freq == policy->min && suspended + /*|| dbs_tuners_ins.freq_step == 0*/) + return; + + //freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; + freq_target = FREQ_STEP_DOWN; //policy->max; + + /* max freq cannot be less than 100. But who knows.... */ + if (unlikely(freq_target == 0)) + freq_target = 5; + + // prevent going under 0 + if(freq_target > this_dbs_info->requested_freq) + this_dbs_info->requested_freq = policy->min; + else + this_dbs_info->requested_freq -= freq_target; + + if (this_dbs_info->requested_freq < policy->min) + this_dbs_info->requested_freq = policy->min; + + //Screen on mode + if (!suspended && this_dbs_info->requested_freq < FREQ_AWAKE_MIN) + this_dbs_info->requested_freq = FREQ_AWAKE_MIN; + + //Screen off mode + if (suspended && this_dbs_info->requested_freq > FREQ_SLEEP_MAX) + this_dbs_info->requested_freq = FREQ_SLEEP_MAX; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, + CPUFREQ_RELATION_H); + return; + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + int i; + mutex_lock(&dbs_mutex); + for_each_online_cpu(i) + dbs_check_cpu(i); + schedule_delayed_work(&dbs_work, + usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); + mutex_unlock(&dbs_mutex); +} + +static inline void dbs_timer_init(void) +{ + init_timer_deferrable(&dbs_work.timer); + schedule_delayed_work(&dbs_work, + usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); + return; +} + +static inline void dbs_timer_exit(void) +{ + cancel_delayed_work(&dbs_work); + return; +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + if (this_dbs_info->enable) /* Already enabled */ + break; + + mutex_lock(&dbs_mutex); + + rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); + j_dbs_info->prev_cpu_idle_down + = j_dbs_info->prev_cpu_idle_up; + } + this_dbs_info->enable = 1; + this_dbs_info->down_skip = 0; + this_dbs_info->requested_freq = policy->cur; + + dbs_enable++; + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + + def_sampling_rate = 10 * latency * + CONFIG_CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER; + + if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) + def_sampling_rate = MIN_STAT_SAMPLING_RATE; + + dbs_tuners_ins.sampling_rate = def_sampling_rate; + + dbs_timer_init(); + cpufreq_register_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + + mutex_unlock(&dbs_mutex); + break; + + case CPUFREQ_GOV_STOP: + mutex_lock(&dbs_mutex); + this_dbs_info->enable = 0; + sysfs_remove_group(&policy->kobj, &dbs_attr_group); + dbs_enable--; + /* + * Stop the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 0) { + dbs_timer_exit(); + cpufreq_unregister_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + + mutex_unlock(&dbs_mutex); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&dbs_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target( + this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target( + this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&dbs_mutex); + break; + } + return 0; +} + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_LAGFREE +static +#endif +struct cpufreq_governor cpufreq_gov_lagfree = { + .name = "lagfree", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +static void lagfree_early_suspend(struct early_suspend *handler) { + suspended = 1; +} + +static void lagfree_late_resume(struct early_suspend *handler) { + suspended = 0; +} + +static struct early_suspend lagfree_power_suspend = { + .suspend = lagfree_early_suspend, + .resume = lagfree_late_resume, + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +}; + +static int __init cpufreq_gov_dbs_init(void) +{ + register_early_suspend(&lagfree_power_suspend); + return cpufreq_register_governor(&cpufreq_gov_lagfree); +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + /* Make sure that the scheduled work is indeed not running */ + flush_scheduled_work(); + + unregister_early_suspend(&lagfree_power_suspend); + cpufreq_unregister_governor(&cpufreq_gov_lagfree); +} + + +MODULE_AUTHOR ("Emilio López "); +MODULE_DESCRIPTION ("'cpufreq_lagfree' - A dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors " + "optimised for use in a battery environment" + "Based on conservative by Alexander Clouter"); +MODULE_LICENSE ("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_LAGFREE +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_lulzactive.c b/drivers/cpufreq/cpufreq_lulzactive.c new file mode 100644 index 00000000..ab5506a6 --- /dev/null +++ b/drivers/cpufreq/cpufreq_lulzactive.c @@ -0,0 +1,1143 @@ +/* + * drivers/cpufreq/cpufreq_lulzactive.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Mike Chan (mike@android.com) + * Edited: Tegrak (luciferanna@gmail.com) + * + * Driver values in /sys/devices/system/cpu/cpufreq/lulzactive + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define LULZACTIVE_VERSION (2) +#define LULZACTIVE_AUTHOR "tegrak" + +// if you changed some codes for optimization, just write your name here. +#define LULZACTIVE_TUNER "simone201" + +#define LOGI(fmt...) printk(KERN_INFO "[lulzactive] " fmt) +#define LOGW(fmt...) printk(KERN_WARNING "[lulzactive] " fmt) +#define LOGD(fmt...) printk(KERN_DEBUG "[lulzactive] " fmt) + +static void (*pm_idle_old)(void); +static atomic_t active_count = ATOMIC_INIT(0); + +struct cpufreq_lulzactive_cpuinfo { + struct timer_list cpu_timer; + int timer_idlecancel; + u64 time_in_idle; + u64 idle_exit_time; + u64 timer_run_time; + int idling; + u64 freq_change_time; + u64 freq_change_time_in_idle; + struct cpufreq_policy *policy; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_table_size; + unsigned int target_freq; + int governor_enabled; +}; + +static DEFINE_PER_CPU(struct cpufreq_lulzactive_cpuinfo, cpuinfo); + +/* Workqueues handle frequency scaling */ +static struct task_struct *up_task; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_down_work; +static cpumask_t up_cpumask; +static spinlock_t up_cpumask_lock; +static cpumask_t down_cpumask; +static spinlock_t down_cpumask_lock; + +/* + * The minimum amount of time to spend at a frequency before we can step up. + */ +#define DEFAULT_UP_SAMPLE_TIME 20000 +static unsigned long up_sample_time; + +/* + * The minimum amount of time to spend at a frequency before we can step down. + */ +#define DEFAULT_DOWN_SAMPLE_TIME 40000 +static unsigned long down_sample_time; + +/* + * DEBUG print flags + */ +static unsigned long debug_mode; +enum { + LULZACTIVE_DEBUG_EARLY_SUSPEND=1, + LULZACTIVE_DEBUG_START_STOP=2, + LULZACTIVE_DEBUG_LOAD=4, + LULZACTIVE_DEBUG_SUSPEND=8, +}; +//#define DEFAULT_DEBUG_MODE (LULZACTIVE_DEBUG_EARLY_SUSPEND | LULZACTIVE_DEBUG_START_STOP | LULZACTIVE_DEBUG_SUSPEND) +#define DEFAULT_DEBUG_MODE (0) + +/* + * CPU freq will be increased if measured load > inc_cpu_load; + */ +#define DEFAULT_INC_CPU_LOAD 75 +static unsigned long inc_cpu_load; + +/* + * CPU freq will be decreased if measured load < dec_cpu_load; + * not implemented yet. + */ +#define DEFAULT_DEC_CPU_LOAD 30 +static unsigned long dec_cpu_load; + +/* + * Increasing frequency table index + * zero disables and causes to always jump straight to max frequency. + */ +#define DEFAULT_PUMP_UP_STEP 1 +static unsigned long pump_up_step; + +/* + * Decreasing frequency table index + * zero disables and will calculate frequency according to load heuristic. + */ +#define DEFAULT_PUMP_DOWN_STEP 1 +static unsigned long pump_down_step; + +/* + * Use minimum frequency while suspended. + */ +static unsigned int suspending; +static unsigned int early_suspended; + +#define SCREEN_OFF_LOWEST_STEP (0xffffffff) +#define DEFAULT_SCREEN_OFF_MIN_STEP (SCREEN_OFF_LOWEST_STEP) +static unsigned long screen_off_min_step; + +#define DEBUG 0 +#define BUFSZ 128 + +#if DEBUG +#include + +struct dbgln { + int cpu; + unsigned long jiffy; + unsigned long run; + char buf[BUFSZ]; +}; + +#define NDBGLNS 256 + +static struct dbgln dbgbuf[NDBGLNS]; +static int dbgbufs; +static int dbgbufe; +static struct proc_dir_entry *dbg_proc; +static spinlock_t dbgpr_lock; + +static u64 up_request_time; +static unsigned int up_max_latency; + +static void dbgpr(char *fmt, ...) +{ + va_list args; + int n; + unsigned long flags; + + spin_lock_irqsave(&dbgpr_lock, flags); + n = dbgbufe; + va_start(args, fmt); + vsnprintf(dbgbuf[n].buf, BUFSZ, fmt, args); + va_end(args); + dbgbuf[n].cpu = smp_processor_id(); + dbgbuf[n].run = nr_running(); + dbgbuf[n].jiffy = jiffies; + + if (++dbgbufe >= NDBGLNS) + dbgbufe = 0; + + if (dbgbufe == dbgbufs) + if (++dbgbufs >= NDBGLNS) + dbgbufs = 0; + + spin_unlock_irqrestore(&dbgpr_lock, flags); +} + +static void dbgdump(void) +{ + int i, j; + unsigned long flags; + static struct dbgln prbuf[NDBGLNS]; + + spin_lock_irqsave(&dbgpr_lock, flags); + i = dbgbufs; + j = dbgbufe; + memcpy(prbuf, dbgbuf, sizeof(dbgbuf)); + dbgbufs = 0; + dbgbufe = 0; + spin_unlock_irqrestore(&dbgpr_lock, flags); + + while (i != j) + { + printk("%lu %d %lu %s", + prbuf[i].jiffy, prbuf[i].cpu, prbuf[i].run, + prbuf[i].buf); + if (++i == NDBGLNS) + i = 0; + } +} + +static int dbg_proc_read(char *buffer, char **start, off_t offset, + int count, int *peof, void *dat) +{ + printk("max up_task latency=%uus\n", up_max_latency); + dbgdump(); + *peof = 1; + return 0; +} + + +#else +#define dbgpr(...) do {} while (0) +#endif + +static int cpufreq_governor_lulzactive(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_LULZACTIVE +static +#endif +struct cpufreq_governor cpufreq_gov_lulzactive = { + .name = "lulzactive", + .governor = cpufreq_governor_lulzactive, + .max_transition_latency = 9000000, + .owner = THIS_MODULE, +}; + +static unsigned int get_freq_table_size(struct cpufreq_frequency_table *freq_table) { + unsigned int size = 0; + while (freq_table[++size].frequency != CPUFREQ_TABLE_END); + return size; +} + +static inline void fix_screen_off_min_step(struct cpufreq_lulzactive_cpuinfo *pcpu) { + if (pcpu->freq_table_size <= 0) { + screen_off_min_step = 0; + return; + } + + if (DEFAULT_SCREEN_OFF_MIN_STEP == screen_off_min_step) + screen_off_min_step = pcpu->freq_table_size - 3; + + if (screen_off_min_step >= pcpu->freq_table_size) + screen_off_min_step = pcpu->freq_table_size - 3; +} + +static inline unsigned int adjust_screen_off_freq( + struct cpufreq_lulzactive_cpuinfo *pcpu, unsigned int freq) { + + if (early_suspended && freq > pcpu->freq_table[screen_off_min_step].frequency) { + freq = pcpu->freq_table[screen_off_min_step].frequency; + pcpu->target_freq = pcpu->policy->cur; + + if (freq > pcpu->policy->max) + freq = pcpu->policy->max; + if (freq < pcpu->policy->min) + freq = pcpu->policy->min; + } + + return freq; +} + +static void cpufreq_lulzactive_timer(unsigned long data) +{ + unsigned int delta_idle; + unsigned int delta_time; + int cpu_load; + int load_since_change; + u64 time_in_idle; + u64 idle_exit_time; + struct cpufreq_lulzactive_cpuinfo *pcpu = + &per_cpu(cpuinfo, data); + u64 now_idle; + unsigned int new_freq; + int index; + int ret; + + /* + * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time, + * this lets idle exit know the current idle time sample has + * been processed, and idle exit can generate a new sample and + * re-arm the timer. This prevents a concurrent idle + * exit on that CPU from writing a new set of info at the same time + * the timer function runs (the timer function can't use that info + * until more time passes). + */ + time_in_idle = pcpu->time_in_idle; + idle_exit_time = pcpu->idle_exit_time; + now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time); + smp_wmb(); + + /* If we raced with cancelling a timer, skip. */ + if (!idle_exit_time) { + dbgpr("timer %d: no valid idle exit sample\n", (int) data); + goto exit; + } + + /* let it be when s5pv310 contorl the suspending by tegrak */ + //if (suspending) { + // goto rearm; + //} + +#if DEBUG + if ((int) jiffies - (int) pcpu->cpu_timer.expires >= 10) + dbgpr("timer %d: late by %d ticks\n", + (int) data, jiffies - pcpu->cpu_timer.expires); +#endif + + delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle); + delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time, + idle_exit_time); + + /* + * If timer ran less than 1ms after short-term sample started, retry. + */ + if (delta_time < 1000) { + dbgpr("timer %d: time delta %u too short exit=%llu now=%llu\n", (int) data, + delta_time, idle_exit_time, pcpu->timer_run_time); + goto rearm; + } + + if (delta_idle > delta_time) + cpu_load = 0; + else + cpu_load = 100 * (delta_time - delta_idle) / delta_time; + + delta_idle = (unsigned int) cputime64_sub(now_idle, + pcpu->freq_change_time_in_idle); + delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time, + pcpu->freq_change_time); + + if (delta_idle > delta_time) + load_since_change = 0; + else + load_since_change = + 100 * (delta_time - delta_idle) / delta_time; + + /* + * Choose greater of short-term load (since last idle timer + * started or timer function re-armed itself) or long-term load + * (since last frequency change). + */ + if (load_since_change > cpu_load) + cpu_load = load_since_change; + + /* + * START lulzactive algorithm section + */ + if (cpu_load >= inc_cpu_load) { + if (pump_up_step && pcpu->policy->cur < pcpu->policy->max) { + ret = cpufreq_frequency_table_target( + pcpu->policy, pcpu->freq_table, + pcpu->policy->cur, CPUFREQ_RELATION_H, + &index); + if (ret < 0) { + goto rearm; + } + + // apply pump_up_step by tegrak + index -= pump_up_step; + if (index < 0) + index = 0; + + new_freq = pcpu->freq_table[index].frequency; + } + else { + new_freq = pcpu->policy->max; + } + } + else { + if (pump_down_step) { + ret = cpufreq_frequency_table_target( + pcpu->policy, pcpu->freq_table, + pcpu->policy->cur, CPUFREQ_RELATION_H, + &index); + if (ret < 0) { + goto rearm; + } + + // apply pump_down_step by tegrak + index += pump_down_step; + if (index >= pcpu->freq_table_size) { + index = pcpu->freq_table_size - 1; + } + + new_freq = (pcpu->policy->cur > pcpu->policy->min) ? + (pcpu->freq_table[index].frequency) : + (pcpu->policy->min); + } + else { + new_freq = pcpu->policy->max * cpu_load / 100; + ret = cpufreq_frequency_table_target( + pcpu->policy, pcpu->freq_table, + new_freq, CPUFREQ_RELATION_H, + &index); + if (ret < 0) { + goto rearm; + } + new_freq = pcpu->freq_table[index].frequency; + } + } + + // adjust freq when screen off + new_freq = adjust_screen_off_freq(pcpu, new_freq); + + if (pcpu->target_freq == new_freq) + { + dbgpr("timer %d: load=%d, already at %d\n", (int) data, cpu_load, new_freq); + goto rearm_if_notmax; + } + + /* + * Do not scale down unless we have been at this frequency for the + * minimum sample time. + */ + if (new_freq < pcpu->target_freq) { + if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) < + down_sample_time) { + dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq); + goto rearm; + } + } + else { + if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) < + up_sample_time) { + dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq); + /* don't reset timer */ + goto rearm; + } + } + + if (suspending && debug_mode & LULZACTIVE_DEBUG_SUSPEND) { + LOGI("suspending: cpu_load=%d%% new_freq=%u ppcpu->policy->cur=%u\n", + cpu_load, new_freq, pcpu->policy->cur); + } + if (early_suspended && !suspending && debug_mode & LULZACTIVE_DEBUG_LOAD) { + LOGI("early_suspended: cpu_load=%d%% new_freq=%u ppcpu->policy->cur=%u\n", + cpu_load, new_freq, pcpu->policy->cur); + } + if (debug_mode & LULZACTIVE_DEBUG_LOAD && !early_suspended && !suspending) { + LOGI("cpu_load=%d%% new_freq=%u pcpu->target_freq=%u pcpu->policy->cur=%u\n", + cpu_load, new_freq, pcpu->target_freq, pcpu->policy->cur); + } + + dbgpr("timer %d: load=%d cur=%d tgt=%d queue\n", (int) data, cpu_load, pcpu->target_freq, new_freq); + + if (new_freq < pcpu->target_freq) { + pcpu->target_freq = new_freq; + spin_lock(&down_cpumask_lock); + cpumask_set_cpu(data, &down_cpumask); + spin_unlock(&down_cpumask_lock); + queue_work(down_wq, &freq_scale_down_work); + } else { + pcpu->target_freq = new_freq; +#if DEBUG + up_request_time = ktime_to_us(ktime_get()); +#endif + spin_lock(&up_cpumask_lock); + cpumask_set_cpu(data, &up_cpumask); + spin_unlock(&up_cpumask_lock); + wake_up_process(up_task); + } + +rearm_if_notmax: + /* + * Already set max speed and don't see a need to change that, + * wait until next idle to re-evaluate, don't need timer. + */ + if (pcpu->target_freq == pcpu->policy->max) + goto exit; + +rearm: + if (!timer_pending(&pcpu->cpu_timer)) { + /* + * If already at min: if that CPU is idle, don't set timer. + * Else cancel the timer if that CPU goes idle. We don't + * need to re-evaluate speed until the next idle exit. + */ + if (pcpu->target_freq == pcpu->policy->min) { + smp_rmb(); + + if (pcpu->idling) { + dbgpr("timer %d: cpu idle, don't re-arm\n", (int) data); + goto exit; + } + + pcpu->timer_idlecancel = 1; + } + + pcpu->time_in_idle = get_cpu_idle_time_us( + data, &pcpu->idle_exit_time); + mod_timer(&pcpu->cpu_timer, jiffies + 2); + dbgpr("timer %d: set timer for %lu exit=%llu\n", (int) data, pcpu->cpu_timer.expires, pcpu->idle_exit_time); + } + +exit: + return; +} + +static void cpufreq_lulzactive_idle(void) +{ + struct cpufreq_lulzactive_cpuinfo *pcpu = + &per_cpu(cpuinfo, smp_processor_id()); + int pending; + + if (!pcpu->governor_enabled) { + pm_idle_old(); + return; + } + + pcpu->idling = 1; + smp_wmb(); + pending = timer_pending(&pcpu->cpu_timer); + + if (pcpu->target_freq != pcpu->policy->min) { +#ifdef CONFIG_SMP + /* + * Entering idle while not at lowest speed. On some + * platforms this can hold the other CPU(s) at that speed + * even though the CPU is idle. Set a timer to re-evaluate + * speed so this idle CPU doesn't hold the other CPUs above + * min indefinitely. This should probably be a quirk of + * the CPUFreq driver. + */ + if (!pending) { + pcpu->time_in_idle = get_cpu_idle_time_us( + smp_processor_id(), &pcpu->idle_exit_time); + pcpu->timer_idlecancel = 0; + mod_timer(&pcpu->cpu_timer, jiffies + 2); + dbgpr("idle: enter at %d, set timer for %lu exit=%llu\n", + pcpu->target_freq, pcpu->cpu_timer.expires, + pcpu->idle_exit_time); + } +#endif + } else { + /* + * If at min speed and entering idle after load has + * already been evaluated, and a timer has been set just in + * case the CPU suddenly goes busy, cancel that timer. The + * CPU didn't go busy; we'll recheck things upon idle exit. + */ + if (pending && pcpu->timer_idlecancel) { + dbgpr("idle: cancel timer for %lu\n", pcpu->cpu_timer.expires); + del_timer(&pcpu->cpu_timer); + /* + * Ensure last timer run time is after current idle + * sample start time, so next idle exit will always + * start a new idle sampling period. + */ + pcpu->idle_exit_time = 0; + pcpu->timer_idlecancel = 0; + } + } + + pm_idle_old(); + pcpu->idling = 0; + smp_wmb(); + + /* + * Arm the timer for 1-2 ticks later if not already, and if the timer + * function has already processed the previous load sampling + * interval. (If the timer is not pending but has not processed + * the previous interval, it is probably racing with us on another + * CPU. Let it compute load based on the previous sample and then + * re-arm the timer for another interval when it's done, rather + * than updating the interval start time to be "now", which doesn't + * give the timer function enough time to make a decision on this + * run.) + */ + if (timer_pending(&pcpu->cpu_timer) == 0 && + pcpu->timer_run_time >= pcpu->idle_exit_time) { + pcpu->time_in_idle = + get_cpu_idle_time_us(smp_processor_id(), + &pcpu->idle_exit_time); + pcpu->timer_idlecancel = 0; + mod_timer(&pcpu->cpu_timer, jiffies + 2); + dbgpr("idle: exit, set timer for %lu exit=%llu\n", pcpu->cpu_timer.expires, pcpu->idle_exit_time); +#if DEBUG + } else if (timer_pending(&pcpu->cpu_timer) == 0 && + pcpu->timer_run_time < pcpu->idle_exit_time) { + dbgpr("idle: timer not run yet: exit=%llu tmrrun=%llu\n", + pcpu->idle_exit_time, pcpu->timer_run_time); +#endif + } + +} + +static int cpufreq_lulzactive_up_task(void *data) +{ + unsigned int cpu; + cpumask_t tmp_mask; + struct cpufreq_lulzactive_cpuinfo *pcpu; + +#if DEBUG + u64 now; + u64 then; + unsigned int lat; +#endif + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + spin_lock(&up_cpumask_lock); + + if (cpumask_empty(&up_cpumask)) { + spin_unlock(&up_cpumask_lock); + schedule(); + + if (kthread_should_stop()) + break; + + spin_lock(&up_cpumask_lock); + } + + set_current_state(TASK_RUNNING); + +#if DEBUG + then = up_request_time; + now = ktime_to_us(ktime_get()); + + if (now > then) { + lat = ktime_to_us(ktime_get()) - then; + + if (lat > up_max_latency) + up_max_latency = lat; + } +#endif + + tmp_mask = up_cpumask; + cpumask_clear(&up_cpumask); + spin_unlock(&up_cpumask_lock); + + for_each_cpu(cpu, &tmp_mask) { + pcpu = &per_cpu(cpuinfo, cpu); + + if (nr_running() == 1) { + dbgpr("up %d: tgt=%d nothing else running\n", cpu, + pcpu->target_freq); + } + + __cpufreq_driver_target(pcpu->policy, + pcpu->target_freq, + CPUFREQ_RELATION_H); + pcpu->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu, + &pcpu->freq_change_time); + dbgpr("up %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur); + } + } + + return 0; +} + +static void cpufreq_lulzactive_freq_down(struct work_struct *work) +{ + unsigned int cpu; + cpumask_t tmp_mask; + struct cpufreq_lulzactive_cpuinfo *pcpu; + + spin_lock(&down_cpumask_lock); + tmp_mask = down_cpumask; + cpumask_clear(&down_cpumask); + spin_unlock(&down_cpumask_lock); + + for_each_cpu(cpu, &tmp_mask) { + pcpu = &per_cpu(cpuinfo, cpu); + __cpufreq_driver_target(pcpu->policy, + pcpu->target_freq, + CPUFREQ_RELATION_H); + pcpu->freq_change_time_in_idle = + get_cpu_idle_time_us(cpu, + &pcpu->freq_change_time); + dbgpr("down %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur); + } +} + +// inc_cpu_load +static ssize_t show_inc_cpu_load(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", inc_cpu_load); +} + +static ssize_t store_inc_cpu_load(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + ssize_t ret; + if(strict_strtoul(buf, 0, &inc_cpu_load)==-EINVAL) return -EINVAL; + + if (inc_cpu_load > 100) { + inc_cpu_load = 100; + } + else if (inc_cpu_load < 10) { + inc_cpu_load = 10; + } + return count; +} + +static struct global_attr inc_cpu_load_attr = __ATTR(inc_cpu_load, 0666, + show_inc_cpu_load, store_inc_cpu_load); + +// down_sample_time +static ssize_t show_down_sample_time(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", down_sample_time); +} + +static ssize_t store_down_sample_time(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + if(strict_strtoul(buf, 0, &down_sample_time)==-EINVAL) return -EINVAL; + return count; +} + +static struct global_attr down_sample_time_attr = __ATTR(down_sample_time, 0666, + show_down_sample_time, store_down_sample_time); + +// up_sample_time +static ssize_t show_up_sample_time(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", up_sample_time); +} + +static ssize_t store_up_sample_time(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + if(strict_strtoul(buf, 0, &up_sample_time)==-EINVAL) return -EINVAL; + return count; +} + +static struct global_attr up_sample_time_attr = __ATTR(up_sample_time, 0666, + show_up_sample_time, store_up_sample_time); + +// debug_mode +static ssize_t show_debug_mode(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", debug_mode); +} + +static ssize_t store_debug_mode(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + if(strict_strtoul(buf, 0, &debug_mode)==-EINVAL) return -EINVAL; + return count; +} + +static struct global_attr debug_mode_attr = __ATTR(debug_mode, 0666, + show_debug_mode, store_debug_mode); + +// pump_up_step +static ssize_t show_pump_up_step(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", pump_up_step); +} + +static ssize_t store_pump_up_step(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + if(strict_strtoul(buf, 0, &pump_up_step)==-EINVAL) return -EINVAL; + return count; +} + +static struct global_attr pump_up_step_attr = __ATTR(pump_up_step, 0666, + show_pump_up_step, store_pump_up_step); + +// pump_down_step +static ssize_t show_pump_down_step(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", pump_down_step); +} + +static ssize_t store_pump_down_step(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + ssize_t ret; + struct cpufreq_lulzactive_cpuinfo *pcpu; + + if(strict_strtoul(buf, 0, &pump_down_step)==-EINVAL) return -EINVAL; + + pcpu = &per_cpu(cpuinfo, 0); + // fix out of bound + if (pcpu->freq_table_size <= pump_down_step) { + pump_down_step = pcpu->freq_table_size - 1; + } + return count; +} + +static struct global_attr pump_down_step_attr = __ATTR(pump_down_step, 0666, + show_pump_down_step, store_pump_down_step); + +// screen_off_min_step +static ssize_t show_screen_off_min_step(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct cpufreq_lulzactive_cpuinfo *pcpu; + + pcpu = &per_cpu(cpuinfo, 0); + fix_screen_off_min_step(pcpu); + + return sprintf(buf, "%lu\n", screen_off_min_step); +} + +static ssize_t store_screen_off_min_step(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + struct cpufreq_lulzactive_cpuinfo *pcpu; + ssize_t ret; + + if(strict_strtoul(buf, 0, &screen_off_min_step)==-EINVAL) return -EINVAL; + + pcpu = &per_cpu(cpuinfo, 0); + fix_screen_off_min_step(pcpu); + + return count; +} + +static struct global_attr screen_off_min_step_attr = __ATTR(screen_off_min_step, 0666, + show_screen_off_min_step, store_screen_off_min_step); + +// author +static ssize_t show_author(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", LULZACTIVE_AUTHOR); +} + +static struct global_attr author_attr = __ATTR(author, 0444, + show_author, NULL); + +// tuner +static ssize_t show_tuner(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", LULZACTIVE_TUNER); +} + +static struct global_attr tuner_attr = __ATTR(tuner, 0444, + show_tuner, NULL); + +// version +static ssize_t show_version(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", LULZACTIVE_VERSION); +} + +static struct global_attr version_attr = __ATTR(version, 0444, + show_version, NULL); + +// freq_table +static ssize_t show_freq_table(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct cpufreq_lulzactive_cpuinfo *pcpu; + char temp[64]; + int i; + + pcpu = &per_cpu(cpuinfo, 0); + + for (i = 0; i < pcpu->freq_table_size; i++) { + sprintf(temp, "%u\n", pcpu->freq_table[i].frequency); + strcat(buf, temp); + } + + return strlen(buf); +} + +static struct global_attr freq_table_attr = __ATTR(freq_table, 0444, + show_freq_table, NULL); + +static struct attribute *lulzactive_attributes[] = { + &inc_cpu_load_attr.attr, + &up_sample_time_attr.attr, + &down_sample_time_attr.attr, + &pump_up_step_attr.attr, + &pump_down_step_attr.attr, + &screen_off_min_step_attr.attr, + &debug_mode_attr.attr, + &author_attr.attr, + &tuner_attr.attr, + &version_attr.attr, + &freq_table_attr.attr, + NULL, +}; + +static struct attribute_group lulzactive_attr_group = { + .attrs = lulzactive_attributes, + .name = "lulzactive", +}; + +static int cpufreq_governor_lulzactive(struct cpufreq_policy *new_policy, + unsigned int event) +{ + int rc; + struct cpufreq_lulzactive_cpuinfo *pcpu = + &per_cpu(cpuinfo, new_policy->cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if (debug_mode & LULZACTIVE_DEBUG_START_STOP) { + LOGI("CPUFREQ_GOV_START\n"); + } + if (!cpu_online(new_policy->cpu)) + return -EINVAL; + + pcpu->policy = new_policy; + pcpu->freq_table = cpufreq_frequency_get_table(new_policy->cpu); + pcpu->target_freq = new_policy->cur; + pcpu->freq_change_time_in_idle = + get_cpu_idle_time_us(new_policy->cpu, + &pcpu->freq_change_time); + pcpu->governor_enabled = 1; + pcpu->freq_table_size = get_freq_table_size(pcpu->freq_table); + + // fix invalid screen_off_min_step + fix_screen_off_min_step(pcpu); + + /* + * Do not register the idle hook and create sysfs + * entries if we have already done so. + */ + if (atomic_inc_return(&active_count) > 1) + return 0; + + rc = sysfs_create_group(cpufreq_global_kobject, + &lulzactive_attr_group); + if (rc) + return rc; + + pm_idle_old = pm_idle; + pm_idle = cpufreq_lulzactive_idle; + break; + + case CPUFREQ_GOV_STOP: + if (debug_mode & LULZACTIVE_DEBUG_START_STOP) { + LOGI("CPUFREQ_GOV_STOP\n"); + } + pcpu->governor_enabled = 0; + + if (atomic_dec_return(&active_count) > 0) + return 0; + + sysfs_remove_group(cpufreq_global_kobject, + &lulzactive_attr_group); + + pm_idle = pm_idle_old; + del_timer(&pcpu->cpu_timer); + break; + + case CPUFREQ_GOV_LIMITS: + if (new_policy->max < new_policy->cur) + __cpufreq_driver_target(new_policy, + new_policy->max, CPUFREQ_RELATION_H); + else if (new_policy->min > new_policy->cur) + __cpufreq_driver_target(new_policy, + new_policy->min, CPUFREQ_RELATION_L); + break; + } + return 0; +} + +static void lulzactive_early_suspend(struct early_suspend *handler) { + struct cpufreq_lulzactive_cpuinfo *pcpu; + unsigned int min_freq, max_freq; + + early_suspended = 1; + + if (debug_mode & LULZACTIVE_DEBUG_EARLY_SUSPEND) { + LOGI("%s\n", __func__); + + pcpu = &per_cpu(cpuinfo, 0); + + min_freq = pcpu->policy->min; + + max_freq = min(pcpu->policy->max, pcpu->freq_table[screen_off_min_step].frequency); + max_freq = max(max_freq, min_freq); + + LOGI("lock @%u~@%uMHz\n", min_freq / 1000, max_freq / 1000); + } +} + +static void lulzactive_late_resume(struct early_suspend *handler) { + early_suspended = 0; + if (debug_mode & LULZACTIVE_DEBUG_EARLY_SUSPEND) { + LOGI("%s\n", __func__); + } +} + +static struct early_suspend lulzactive_power_suspend = { + .suspend = lulzactive_early_suspend, + .resume = lulzactive_late_resume, + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, +}; + +static int lulzactive_pm_notifier_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct cpufreq_policy* policy; + + switch (event) { + case PM_SUSPEND_PREPARE: + suspending = 1; + if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { + LOGI("PM_SUSPEND_PREPARE"); + policy = cpufreq_cpu_get(0); + if (policy) { + LOGI("PM_SUSPEND_PREPARE using @%uMHz\n", policy->cur); + } + } + break; + case PM_POST_SUSPEND: + suspending = 0; + if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { + LOGI("PM_POST_SUSPEND"); + policy = cpufreq_cpu_get(0); + if (policy) { + LOGI("PM_POST_SUSPEND using @%uMHz\n", policy->cur); + } + } + break; + case PM_RESTORE_PREPARE: + if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { + LOGI("PM_RESTORE_PREPARE"); + } + break; + case PM_POST_RESTORE: + if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { + LOGI("PM_POST_RESTORE"); + } + break; + case PM_HIBERNATION_PREPARE: + if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { + LOGI("PM_HIBERNATION_PREPARE"); + } + break; + case PM_POST_HIBERNATION: + if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { + LOGI("PM_POST_HIBERNATION"); + } + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block lulzactive_pm_notifier = { + .notifier_call = lulzactive_pm_notifier_event, +}; + +static int __init cpufreq_lulzactive_init(void) +{ + unsigned int i; + struct cpufreq_lulzactive_cpuinfo *pcpu; + struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + + up_sample_time = DEFAULT_UP_SAMPLE_TIME; + down_sample_time = DEFAULT_DOWN_SAMPLE_TIME; + debug_mode = DEFAULT_DEBUG_MODE; + inc_cpu_load = DEFAULT_INC_CPU_LOAD; + dec_cpu_load = DEFAULT_DEC_CPU_LOAD; + pump_up_step = DEFAULT_PUMP_UP_STEP; + pump_down_step = DEFAULT_PUMP_DOWN_STEP; + early_suspended = 0; + suspending = 0; + screen_off_min_step = DEFAULT_SCREEN_OFF_MIN_STEP; + + /* Initalize per-cpu timers */ + for_each_possible_cpu(i) { + pcpu = &per_cpu(cpuinfo, i); + init_timer(&pcpu->cpu_timer); + pcpu->cpu_timer.function = cpufreq_lulzactive_timer; + pcpu->cpu_timer.data = i; + } + + up_task = kthread_create(cpufreq_lulzactive_up_task, NULL, + "klulzactiveup"); + if (IS_ERR(up_task)) + return PTR_ERR(up_task); + + sched_setscheduler_nocheck(up_task, SCHED_FIFO, ¶m); + get_task_struct(up_task); + + /* No rescuer thread, bind to CPU queuing the work for possibly + warm cache (probably doesn't matter much). */ + down_wq = create_workqueue("klulzactive_down"); + + if (! down_wq) + goto err_freeuptask; + + INIT_WORK(&freq_scale_down_work, + cpufreq_lulzactive_freq_down); + +#if DEBUG + spin_lock_init(&dbgpr_lock); + dbg_proc = create_proc_entry("igov", S_IWUSR | S_IRUGO, NULL); + dbg_proc->read_proc = dbg_proc_read; +#endif + spin_lock_init(&down_cpumask_lock); + spin_lock_init(&up_cpumask_lock); + + register_pm_notifier(&lulzactive_pm_notifier); + register_early_suspend(&lulzactive_power_suspend); + + return cpufreq_register_governor(&cpufreq_gov_lulzactive); + +err_freeuptask: + put_task_struct(up_task); + return -ENOMEM; +} + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_LULZACTIVE +fs_initcall(cpufreq_lulzactive_init); +#else +module_init(cpufreq_lulzactive_init); +#endif + +static void __exit cpufreq_lulzactive_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_lulzactive); + unregister_early_suspend(&lulzactive_power_suspend); + unregister_pm_notifier(&lulzactive_pm_notifier); + kthread_stop(up_task); + put_task_struct(up_task); + destroy_workqueue(down_wq); +} + +module_exit(cpufreq_lulzactive_exit); + +MODULE_AUTHOR("Tegrak "); +MODULE_DESCRIPTION("'lulzactive' - improved interactive governor inspired by smartass"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq_minmax.c b/drivers/cpufreq/cpufreq_minmax.c new file mode 100644 index 00000000..09dba0d2 --- /dev/null +++ b/drivers/cpufreq/cpufreq_minmax.c @@ -0,0 +1,575 @@ +/* + * drivers/cpufreq/cpufreq_minmax.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * (C) 2004 Alexander Clouter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This governor is an adapatation of the conservative governor. + * See the Documentation/cpu-freq/governors.txt for more information. + * + * Adapatation from conservative by Erasmux. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_UP_THRESHOLD (92) +#define DEF_FREQUENCY_DOWN_THRESHOLD (27) + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers + * with CPUFREQ_ETERNAL), this governor will not work. + * All times here are in uS. + */ +static unsigned int def_sampling_rate; +#define MIN_SAMPLING_RATE_RATIO (2) +/* for correct statistics, we need at least 10 ticks between each measure */ +#define MIN_STAT_SAMPLING_RATE \ + (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(CONFIG_CPU_FREQ_MIN_TICKS)) +#define MIN_SAMPLING_RATE \ + (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) +#define MAX_SAMPLING_RATE (500 * def_sampling_rate) +#define DEF_SAMPLING_DOWN_FACTOR (10) +#define MAX_SAMPLING_DOWN_FACTOR (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) +#define CONFIG_CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER (500) +#define CONFIG_CPU_FREQ_MIN_TICKS (2) + +static void do_dbs_timer(struct work_struct *work); + +struct cpu_dbs_info_s { + struct cpufreq_policy *cur_policy; + unsigned int prev_cpu_idle_up; + unsigned int prev_cpu_idle_down; + unsigned int enable; + unsigned int down_skip; + unsigned int requested_freq; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug + * lock and dbs_mutex. cpu_hotplug lock should always be held before + * dbs_mutex. If any function that can potentially take cpu_hotplug lock + * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then + * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock + * is recursive for the same process. -Venki + */ +static DEFINE_MUTEX (dbs_mutex); +static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); + +struct dbs_tuners { + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int ignore_nice; +}; + +static struct dbs_tuners dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .ignore_nice = 0, +}; + +static inline unsigned int get_cpu_idle_time(unsigned int cpu) +{ + unsigned int add_nice = 0, ret; + + if (dbs_tuners_ins.ignore_nice) + add_nice = kstat_cpu(cpu).cpustat.nice; + + ret = kstat_cpu(cpu).cpustat.idle + + kstat_cpu(cpu).cpustat.iowait + + add_nice; + + return ret; +} + +/* keep track of frequency transitions */ +static int +dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, + freq->cpu); + + if (!this_dbs_info->enable) + return 0; + + this_dbs_info->requested_freq = freq->new; + + return 0; +} + +static struct notifier_block dbs_cpufreq_notifier_block = { + .notifier_call = dbs_cpufreq_notifier +}; + +/************************** sysfs interface ************************/ +static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) +{ + return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); +} + +static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) +{ + return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); +} + +#define define_one_ro(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +define_one_ro(sampling_rate_max); +define_one_ro(sampling_rate_min); + +/* cpufreq_minmax Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct cpufreq_policy *unused, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(sampling_down_factor, sampling_down_factor); +show_one(up_threshold, up_threshold); +show_one(down_threshold, down_threshold); +show_one(ignore_nice_load, ignore_nice); + +static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf (buf, "%u", &input); + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_down_factor = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_sampling_rate(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf (buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.sampling_rate = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_up_threshold(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf (buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.up_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_down_threshold(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf (buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.down_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + mutex_lock(&dbs_mutex); + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + mutex_unlock(&dbs_mutex); + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); + j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; + } + mutex_unlock(&dbs_mutex); + + return count; +} + +#define define_one_rw(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + +define_one_rw(sampling_rate); +define_one_rw(sampling_down_factor); +define_one_rw(up_threshold); +define_one_rw(down_threshold); +define_one_rw(ignore_nice_load); + +static struct attribute * dbs_attributes[] = { + &sampling_rate_max.attr, + &sampling_rate_min.attr, + &sampling_rate.attr, + &sampling_down_factor.attr, + &up_threshold.attr, + &down_threshold.attr, + &ignore_nice_load.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "minmax", +}; + +/************************** sysfs end ************************/ + +static void dbs_check_cpu(int cpu) +{ + unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; + unsigned int tmp_idle_ticks, total_idle_ticks; + //unsigned int freq_target; + unsigned int freq_down_sampling_rate; + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); + struct cpufreq_policy *policy; + + if (!this_dbs_info->enable) + return; + + policy = this_dbs_info->cur_policy; + + /* + * The default safe range is 20% to 80% + * Every sampling_rate, we check + * - If current idle time is less than 20%, then we try to + * increase frequency + * Every sampling_rate*sampling_down_factor, we check + * - If current idle time is more than 80%, then we try to + * decrease frequency + * + */ + + this_dbs_info->down_skip++; + + /* Check for frequency increase */ + idle_ticks = UINT_MAX; + + /* Check for frequency increase */ + total_idle_ticks = get_cpu_idle_time(cpu); + tmp_idle_ticks = total_idle_ticks - + this_dbs_info->prev_cpu_idle_up; + this_dbs_info->prev_cpu_idle_up = total_idle_ticks; + + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; + + /* Scale idle ticks by 100 and compare with up and down ticks */ + idle_ticks *= 100; + up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * + usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + if (idle_ticks < up_idle_ticks) { + this_dbs_info->down_skip = 0; + this_dbs_info->prev_cpu_idle_down = + this_dbs_info->prev_cpu_idle_up; + + /* if we are already at full speed then break out early */ + if (this_dbs_info->requested_freq == policy->max) + return; + + this_dbs_info->requested_freq = policy->max; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, + CPUFREQ_RELATION_H); + return; + } + + /* Check for frequency decrease */ + if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) + return; + else this_dbs_info->down_skip--; /* just to prevent overflow */ + + + /* Check for frequency decrease */ + total_idle_ticks = this_dbs_info->prev_cpu_idle_up; + tmp_idle_ticks = total_idle_ticks - + this_dbs_info->prev_cpu_idle_down; + this_dbs_info->prev_cpu_idle_down = total_idle_ticks; + + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; + + /* Scale idle ticks by 100 and compare with up and down ticks */ + idle_ticks *= 100; + + freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * + dbs_tuners_ins.sampling_down_factor; + down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * + usecs_to_jiffies(freq_down_sampling_rate); + + if (idle_ticks > down_idle_ticks) { + /* + * if we are already at the lowest speed then break out early + * or if we 'cannot' reduce the speed as the user might want + * freq_target to be zero + */ + if (this_dbs_info->requested_freq == policy->min) + return; + + this_dbs_info->requested_freq = policy->min; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, + CPUFREQ_RELATION_H); + return; + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + int i; + + mutex_lock(&dbs_mutex); + for_each_online_cpu(i) + dbs_check_cpu(i); + schedule_delayed_work(&dbs_work, + usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); + mutex_unlock(&dbs_mutex); +} + +static inline void dbs_timer_init(void) +{ + init_timer_deferrable(&dbs_work.timer); + schedule_delayed_work(&dbs_work, + usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); + return; +} + +static inline void dbs_timer_exit(void) +{ + cancel_delayed_work(&dbs_work); + return; +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + if (this_dbs_info->enable) /* Already enabled */ + break; + + mutex_lock(&dbs_mutex); + + rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); + j_dbs_info->prev_cpu_idle_down + = j_dbs_info->prev_cpu_idle_up; + } + this_dbs_info->enable = 1; + this_dbs_info->down_skip = 0; + this_dbs_info->requested_freq = policy->cur; + + dbs_enable++; + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + + def_sampling_rate = 10 * latency * + CONFIG_CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER; + + if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) + def_sampling_rate = MIN_STAT_SAMPLING_RATE; + + dbs_tuners_ins.sampling_rate = def_sampling_rate; + + dbs_timer_init(); + cpufreq_register_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + + mutex_unlock(&dbs_mutex); + break; + + case CPUFREQ_GOV_STOP: + mutex_lock(&dbs_mutex); + this_dbs_info->enable = 0; + sysfs_remove_group(&policy->kobj, &dbs_attr_group); + dbs_enable--; + /* + * Stop the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 0) { + dbs_timer_exit(); + cpufreq_unregister_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + + mutex_unlock(&dbs_mutex); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&dbs_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target( + this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target( + this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&dbs_mutex); + break; + } + return 0; +} + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_MINMAX +static +#endif +struct cpufreq_governor cpufreq_gov_minmax = { + .name = "minmax", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +static int __init cpufreq_gov_dbs_init(void) +{ + return cpufreq_register_governor(&cpufreq_gov_minmax); +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + /* Make sure that the scheduled work is indeed not running */ + flush_scheduled_work(); + + cpufreq_unregister_governor(&cpufreq_gov_minmax); +} + +MODULE_AUTHOR ("Erasmux"); +MODULE_DESCRIPTION ("'cpufreq_minmax' - A dynamic cpufreq governor which " + "minimizes the frequecy jumps by always selecting either " + "the minimal or maximal frequency"); +MODULE_LICENSE ("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_MINMAX +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_smartass.c b/drivers/cpufreq/cpufreq_smartass.c new file mode 100644 index 00000000..0ba3ee61 --- /dev/null +++ b/drivers/cpufreq/cpufreq_smartass.c @@ -0,0 +1,642 @@ +/* + * drivers/cpufreq/cpufreq_smartass.c + * + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Erasmux + * + * Based on the interactive governor By Mike Chan (mike@android.com) + * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net) + * + * requires to add + * EXPORT_SYMBOL_GPL(nr_running); + * at the end of kernel/sched.c + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void (*pm_idle_old)(void); +static atomic_t active_count = ATOMIC_INIT(0); + +struct smartass_info_s { + struct cpufreq_policy *cur_policy; + struct timer_list timer; + u64 time_in_idle; + u64 idle_exit_time; + unsigned int force_ramp_up; + unsigned int enable; +}; +static DEFINE_PER_CPU(struct smartass_info_s, smartass_info); + +/* Workqueues handle frequency scaling */ +static struct workqueue_struct *up_wq; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_work; + +static u64 freq_change_time; +static u64 freq_change_time_in_idle; + +static cpumask_t work_cpumask; +static unsigned int suspended; + + +/* + * The minimum amount of time to spend at a frequency before we can ramp down, + * default is 45ms. + */ +#define DEFAULT_RAMP_DOWN_RATE_NS 45000; +static unsigned long ramp_down_rate_ns; + +/* + * When ramping up frequency jump to at least this frequency. + */ + +#define DEFAULT_UP_MIN_FREQ (800*1000) +static unsigned int up_min_freq; + +/* + * When sleep_max_freq>0 the frequency when suspended will be capped + * by this frequency. Also will wake up at max frequency of policy + * to minimize wakeup issues. + * Set sleep_max_freq=0 to disable this behavior. + */ +#define DEFAULT_SLEEP_MAX_FREQ (400*1000) +static unsigned int sleep_max_freq; + +/* + * Sampling rate, I highly recommend to leave it at 2. + */ +#define DEFAULT_SAMPLE_RATE_JIFFIES 2 +static unsigned int sample_rate_jiffies; + +/* + * Max freqeuncy delta when ramping up. + */ + +#define DEFAULT_MAX_RAMP_UP (300 * 1000) +static unsigned int max_ramp_up; + +/* + * CPU freq will be increased if measured load > max_cpu_load; + */ +#define DEFAULT_MAX_CPU_LOAD 60 +static unsigned long max_cpu_load; + +/* + * CPU freq will be decreased if measured load < min_cpu_load; + */ +#define DEFAULT_MIN_CPU_LOAD 30 +static unsigned long min_cpu_load; + +//Leave this zero by default, people can tweak it if they so wish. +#define DEFAULT_RAMP_UP_RATE_NS 0 +static unsigned long ramp_up_rate_ns; + + +static int cpufreq_governor_smartass(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS +static +#endif +struct cpufreq_governor cpufreq_gov_smartass = { + .name = "smartass", + .governor = cpufreq_governor_smartass, + .max_transition_latency = 9000000, + .owner = THIS_MODULE, +}; + +static void cpufreq_smartass_timer(unsigned long data) +{ + u64 delta_idle; + u64 update_time; + u64 now_idle; + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, data); + struct cpufreq_policy *policy = this_smartass->cur_policy; + + now_idle = get_cpu_idle_time_us(data, &update_time); + + if (update_time == this_smartass->idle_exit_time) + return; + + delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle); + //printk(KERN_INFO "smartass: t=%llu i=%llu\n",cputime64_sub(update_time,this_smartass->idle_exit_time),delta_idle); + + /* Scale up if there were no idle cycles since coming out of idle */ + if (delta_idle == 0 && cputime64_sub(update_time, freq_change_time) > ramp_up_rate_ns) { + if (policy->cur == policy->max) + return; + + if (nr_running() < 1) + return; + + this_smartass->force_ramp_up = 1; + cpumask_set_cpu(data, &work_cpumask); + queue_work(up_wq, &freq_scale_work); + return; + } + + /* + * There is a window where if the cpu utlization can go from low to high + * between the timer expiring, delta_idle will be > 0 and the cpu will + * be 100% busy, preventing idle from running, and this timer from + * firing. So setup another timer to fire to check cpu utlization. + * Do not setup the timer if there is no scheduled work. + */ + if (!timer_pending(&this_smartass->timer) && nr_running() > 0) { + this_smartass->time_in_idle = get_cpu_idle_time_us( + data, &this_smartass->idle_exit_time); + mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies); + } + + if (policy->cur == policy->min) + return; + + /* + * Do not scale down unless we have been at this frequency for the + * minimum sample time. + */ + if (cputime64_sub(update_time, freq_change_time) < ramp_down_rate_ns) + return; + + + cpumask_set_cpu(data, &work_cpumask); + queue_work(down_wq, &freq_scale_work); +} + +static void cpufreq_idle(void) +{ + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + + pm_idle_old(); + + if (!cpumask_test_cpu(smp_processor_id(), policy->cpus)) + return; + + /* Timer to fire in 1-2 ticks, jiffie aligned. */ + if (timer_pending(&this_smartass->timer) == 0) { + this_smartass->time_in_idle = get_cpu_idle_time_us( + smp_processor_id(), &this_smartass->idle_exit_time); + mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies); + } +} + +/* + * Choose the cpu frequency based off the load. For now choose the minimum + * frequency that will satisfy the load, which is no +t always the lower power. + */ +static unsigned int cpufreq_smartass_calc_freq(unsigned int cpu, struct cpufreq_policy *policy) +{ + unsigned int delta_time; + unsigned int idle_time; + unsigned int cpu_load; + unsigned int new_freq; + u64 current_wall_time; + u64 current_idle_time; + + + current_idle_time = get_cpu_idle_time_us(cpu, ¤t_wall_time); + + idle_time = (unsigned int)( current_idle_time - freq_change_time_in_idle ); + delta_time = (unsigned int)( current_wall_time - freq_change_time ); + + cpu_load = 100 * (delta_time - idle_time) / delta_time; + if (cpu_load < min_cpu_load) { + //if the current frequency is below 1.2ghz, everything is 200mhz steps + if(policy->cur <= 1200000 && policy->cur >= 400000) { +/* catch the extra 200mhz gap between 400 and 800 when scaling down -netarchy */ + if(policy->cur == 800000) { + new_freq = policy->cur - 400000; + return new_freq; + } + else { + new_freq = policy->cur - 200000; + return new_freq; + } + } + //above 1.2ghz though, everything is 100mhz steps + else { + new_freq = policy->cur - 100000; + return new_freq; + } + } + if (cpu_load > max_cpu_load) { + if(policy->cur < 1200000 && policy->cur > 100000) { +/* catch the gap between 400 and 800 when scaling up -netarchy */ + if(policy->cur == 400000) { + new_freq = policy->cur + 400000; + return new_freq; + } + else { + new_freq = policy->cur + 200000; + return new_freq; + } + } + else { + new_freq = policy->cur + 100000; + return new_freq; + } + } + return policy->cur; +} + +/* We use the same work function to sale up and down */ +static void cpufreq_smartass_freq_change_time_work(struct work_struct *work) +{ + unsigned int cpu; + unsigned int new_freq; + struct smartass_info_s *this_smartass; + struct cpufreq_policy *policy; + cpumask_t tmp_mask = work_cpumask; + for_each_cpu(cpu, tmp_mask) { + this_smartass = &per_cpu(smartass_info, cpu); + policy = this_smartass->cur_policy; + + if (this_smartass->force_ramp_up) { + this_smartass->force_ramp_up = 0; + + if (nr_running() == 1) { + cpumask_clear_cpu(cpu, &work_cpumask); + return; + } + + if (policy->cur == policy->max) + return; + + new_freq = policy->cur + max_ramp_up; + + if (suspended && sleep_max_freq) { + if (new_freq > sleep_max_freq) + new_freq = sleep_max_freq; + } else { + if (new_freq < up_min_freq) + new_freq = up_min_freq; + } + + } else { + new_freq = cpufreq_smartass_calc_freq(cpu,policy); + + // in suspend limit to sleep_max_freq and + // jump straight to sleep_max_freq to avoid wakeup problems + if (suspended && sleep_max_freq && + (new_freq > sleep_max_freq || new_freq > policy->cur)) + new_freq = sleep_max_freq; + } + + if (new_freq > policy->max) + new_freq = policy->max; + + if (new_freq < policy->min) + new_freq = policy->min; + + __cpufreq_driver_target(policy, new_freq, + CPUFREQ_RELATION_L); + + freq_change_time_in_idle = get_cpu_idle_time_us(cpu, + &freq_change_time); + + cpumask_clear_cpu(cpu, &work_cpumask); + + } + + +} + +static ssize_t show_ramp_up_rate_ns(struct cpufreq_policy *policy, char *buf) { + return sprintf(buf, "%lu\n", ramp_up_rate_ns); +} + +static ssize_t store_ramp_up_rate_ns(struct cpufreq_policy *policy, const char *buf, size_t count) { + ssize_t ret; + unsigned long input; + ret = strict_strtoul(buf, 0, &input); + if (ret >= 0 && input >= 0 && input <= 100000000) + ramp_up_rate_ns = input; + return ret; +} + +static struct freq_attr ramp_up_rate_ns_attr = __ATTR(ramp_up_rate_ns, 0644, + show_ramp_up_rate_ns, store_ramp_up_rate_ns); + +static ssize_t show_ramp_down_rate_ns(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%lu\n", ramp_down_rate_ns); +} + +static ssize_t store_ramp_down_rate_ns(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 1000 && input <= 100000000) + ramp_down_rate_ns = input; + return res; +} + +static struct freq_attr ramp_down_rate_ns_attr = __ATTR(ramp_down_rate_ns, 0644, + show_ramp_down_rate_ns, store_ramp_down_rate_ns); + +static ssize_t show_up_min_freq(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", up_min_freq); +} + +static ssize_t store_up_min_freq(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + up_min_freq = input; + return res; +} + +static struct freq_attr up_min_freq_attr = __ATTR(up_min_freq, 0644, + show_up_min_freq, store_up_min_freq); + +static ssize_t show_sleep_max_freq(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", sleep_max_freq); +} + +static ssize_t store_sleep_max_freq(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input >= 0) + sleep_max_freq = input; + return res; +} + +static struct freq_attr sleep_max_freq_attr = __ATTR(sleep_max_freq, 0644, + show_sleep_max_freq, store_sleep_max_freq); + +static ssize_t show_sample_rate_jiffies(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", sample_rate_jiffies); +} + +static ssize_t store_sample_rate_jiffies(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 1000) + sample_rate_jiffies = input; + return res; +} + +static struct freq_attr sample_rate_jiffies_attr = __ATTR(sample_rate_jiffies, 0644, + show_sample_rate_jiffies, store_sample_rate_jiffies); + +static ssize_t show_max_ramp_up(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", max_ramp_up); +} + +static ssize_t store_max_ramp_up(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 10000) + max_ramp_up = input; + return res; +} + +static struct freq_attr max_ramp_up_attr = __ATTR(max_ramp_up, 0644, + show_max_ramp_up, store_max_ramp_up); + +static ssize_t show_max_cpu_load(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%lu\n", max_cpu_load); +} + +static ssize_t store_max_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input <= 100) + max_cpu_load = input; + return res; +} + +static struct freq_attr max_cpu_load_attr = __ATTR(max_cpu_load, 0644, + show_max_cpu_load, store_max_cpu_load); + +static ssize_t show_min_cpu_load(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%lu\n", min_cpu_load); +} + +static ssize_t store_min_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count) +{ + ssize_t res; + unsigned long input; + res = strict_strtoul(buf, 0, &input); + if (res >= 0 && input > 0 && input < 100) + min_cpu_load = input; + return res; +} + +static struct freq_attr min_cpu_load_attr = __ATTR(min_cpu_load, 0644, + show_min_cpu_load, store_min_cpu_load); + +static struct attribute * smartass_attributes[] = { + &ramp_down_rate_ns_attr.attr, + &up_min_freq_attr.attr, + &sleep_max_freq_attr.attr, + &sample_rate_jiffies_attr.attr, + &max_ramp_up_attr.attr, + &max_cpu_load_attr.attr, + &min_cpu_load_attr.attr, + &ramp_up_rate_ns_attr.attr, + NULL, +}; + +static struct attribute_group smartass_attr_group = { + .attrs = smartass_attributes, + .name = "smartass", +}; + +static int cpufreq_governor_smartass(struct cpufreq_policy *new_policy, + unsigned int event) +{ + unsigned int cpu = new_policy->cpu; + int rc; + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!new_policy->cur)) + return -EINVAL; + + if (this_smartass->enable) /* Already enabled */ + break; + + /* + * Do not register the idle hook and create sysfs + * entries if we have already done so. + */ + if (atomic_inc_return(&active_count) > 1) + return 0; + + rc = sysfs_create_group(&new_policy->kobj, &smartass_attr_group); + if (rc) + return rc; + pm_idle_old = pm_idle; + pm_idle = cpufreq_idle; + + this_smartass->cur_policy = new_policy; + this_smartass->enable = 1; + + // notice no break here! + + case CPUFREQ_GOV_LIMITS: + if (this_smartass->cur_policy->cur != new_policy->max) + __cpufreq_driver_target(new_policy, new_policy->max, CPUFREQ_RELATION_H); + + break; + + case CPUFREQ_GOV_STOP: + this_smartass->enable = 0; + + if (atomic_dec_return(&active_count) > 1) + return 0; + sysfs_remove_group(&new_policy->kobj, + &smartass_attr_group); + + pm_idle = pm_idle_old; + del_timer(&this_smartass->timer); + break; + } + + return 0; +} + +static void smartass_suspend(int cpu, int suspend) +{ + struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + unsigned int new_freq; + + if (!this_smartass->enable || sleep_max_freq==0) // disable behavior for sleep_max_freq==0 + return; + + if (suspend) { + if (policy->cur > sleep_max_freq) { + new_freq = sleep_max_freq; + if (new_freq > policy->max) + new_freq = policy->max; + if (new_freq < policy->min) + new_freq = policy->min; + __cpufreq_driver_target(policy, new_freq, + CPUFREQ_RELATION_H); + } + } else { // resume at max speed: + __cpufreq_driver_target(policy, policy->max, + CPUFREQ_RELATION_H); + } + +} + +static void smartass_early_suspend(struct early_suspend *handler) { + int i; + suspended = 1; + for_each_online_cpu(i) + smartass_suspend(i,1); +} + +static void smartass_late_resume(struct early_suspend *handler) { + int i; + suspended = 0; + for_each_online_cpu(i) + smartass_suspend(i,0); +} + +static struct early_suspend smartass_power_suspend = { + .suspend = smartass_early_suspend, + .resume = smartass_late_resume, +}; + +static int __init cpufreq_smartass_init(void) +{ + unsigned int i; + struct smartass_info_s *this_smartass; + ramp_down_rate_ns = DEFAULT_RAMP_DOWN_RATE_NS; + up_min_freq = DEFAULT_UP_MIN_FREQ; + sleep_max_freq = DEFAULT_SLEEP_MAX_FREQ; + sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES; + max_ramp_up = DEFAULT_MAX_RAMP_UP; + max_cpu_load = DEFAULT_MAX_CPU_LOAD; + min_cpu_load = DEFAULT_MIN_CPU_LOAD; + ramp_up_rate_ns = DEFAULT_RAMP_UP_RATE_NS; + + suspended = 0; + + /* Initalize per-cpu data: */ + for_each_possible_cpu(i) { + this_smartass = &per_cpu(smartass_info, i); + this_smartass->enable = 0; + this_smartass->force_ramp_up = 0; + this_smartass->time_in_idle = 0; + this_smartass->idle_exit_time = 0; + // intialize timer: + init_timer_deferrable(&this_smartass->timer); + this_smartass->timer.function = cpufreq_smartass_timer; + this_smartass->timer.data = i; + } + + /* Scale up is high priority */ + up_wq = create_workqueue("ksmartass_up"); + down_wq = create_workqueue("ksmartass_down"); + + INIT_WORK(&freq_scale_work, cpufreq_smartass_freq_change_time_work); + + register_early_suspend(&smartass_power_suspend); + + return cpufreq_register_governor(&cpufreq_gov_smartass); +} + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS +pure_initcall(cpufreq_smartass_init); +#else +module_init(cpufreq_smartass_init); +#endif + +static void __exit cpufreq_smartass_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_smartass); + destroy_workqueue(up_wq); + destroy_workqueue(down_wq); +} + +module_exit(cpufreq_smartass_exit); + +MODULE_AUTHOR ("Erasmux"); +MODULE_DESCRIPTION ("'cpufreq_minmax' - A smart cpufreq governor optimized for the hero!"); +MODULE_LICENSE ("GPL"); + diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index f265babc..8a6a819b 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -24,6 +25,9 @@ #define CPUFREQ_NAME_LEN 16 +/********************************************************************* + * CPUFREQ NOTIFIER INTERFACE * + *********************************************************************/ #define CPUFREQ_TRANSITION_NOTIFIER (0) #define CPUFREQ_POLICY_NOTIFIER (1) @@ -46,6 +50,10 @@ static inline int cpufreq_unregister_notifier(struct notifier_block *nb, static inline void disable_cpufreq(void) { } #endif +/* if (cpufreq_driver->target) exists, the ->governor decides what frequency + * within the limits is used. If (cpufreq_driver->setpolicy> exists, these + * two generic policies are available: + */ #define CPUFREQ_POLICY_POWERSAVE (1) #define CPUFREQ_POLICY_PERFORMANCE (2) @@ -55,6 +63,7 @@ static inline void disable_cpufreq(void) { } struct cpufreq_governor; +/* /sys/devices/system/cpu/cpufreq: entry point for global variables */ extern struct kobject *cpufreq_global_kobject; #define CPUFREQ_ETERNAL (-1) @@ -105,6 +114,7 @@ struct cpufreq_policy { #define CPUFREQ_SHARED_TYPE_ALL (2) #define CPUFREQ_SHARED_TYPE_ANY (3) +/******************** cpufreq transition notifiers *******************/ #define CPUFREQ_PRECHANGE (0) #define CPUFREQ_POSTCHANGE (1) @@ -119,6 +129,15 @@ struct cpufreq_freqs { }; +/** + * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch safe) + * @old: old value + * @div: divisor + * @mult: multiplier + * + * + * new = old * mult / div + */ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mult) { #if BITS_PER_LONG == 32 @@ -136,6 +155,9 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mu #endif }; +/********************************************************************* + * CPUFREQ GOVERNORS * + *********************************************************************/ #define CPUFREQ_GOV_START 1 #define CPUFREQ_GOV_STOP 2 @@ -154,6 +176,9 @@ struct cpufreq_governor { struct module *owner; }; +/* + * Pass a target to the cpufreq driver. + */ extern int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation); @@ -211,6 +236,7 @@ struct cpufreq_driver { struct freq_attr **attr; }; +/* flags */ #define CPUFREQ_STICKY 0x01 #define CPUFREQ_CONST_LOOPS 0x02 @@ -274,10 +300,14 @@ static struct global_attr _name = \ __ATTR(_name, 0644, show_##_name, store_##_name) +/********************************************************************* + * CPUFREQ 2.6. INTERFACE * + *********************************************************************/ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_update_policy(unsigned int cpu); #ifdef CONFIG_CPU_FREQ +/* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */ unsigned int cpufreq_get(unsigned int cpu); #else static inline unsigned int cpufreq_get(unsigned int cpu) @@ -286,6 +316,7 @@ static inline unsigned int cpufreq_get(unsigned int cpu) } #endif +/* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ #ifdef CONFIG_CPU_FREQ unsigned int cpufreq_quick_get(unsigned int cpu); unsigned int cpufreq_quick_get_max(unsigned int cpu); @@ -301,8 +332,15 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) #endif +/********************************************************************* + * CPUFREQ DEFAULT GOVERNOR * + *********************************************************************/ +/* + Performance governor is fallback governor if any other gov failed to + auto load due latency restrictions +*/ #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE extern struct cpufreq_governor cpufreq_gov_performance; #endif @@ -320,13 +358,12 @@ extern struct cpufreq_governor cpufreq_gov_ondemand; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE) extern struct cpufreq_governor cpufreq_gov_conservative; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE) +#endif#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE) extern struct cpufreq_governor cpufreq_gov_interactive; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactive) #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND) extern struct cpufreq_governor cpufreq_gov_intellidemand; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_intellidemand) -<<<<<<< HEAD #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2) extern struct cpufreq_governor cpufreq_gov_smartass2; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_smartass2) @@ -366,11 +403,12 @@ extern struct cpufreq_governor cpufreq_gov_ondemandx; #elif defined(CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX) extern struct cpufreq_governor cpufreq_gov_brazilianwax; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_brazilianwax) -======= ->>>>>>> parent of 7e0f70e... Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor #endif +/********************************************************************* + * FREQUENCY TABLE HELPERS * + *********************************************************************/ #define CPUFREQ_ENTRY_INVALID ~0 #define CPUFREQ_TABLE_END ~1 @@ -393,10 +431,12 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, unsigned int relation, unsigned int *index); +/* the following 3 funtions are for cpufreq core use only */ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); void cpufreq_cpu_put(struct cpufreq_policy *data); +/* the following are really really optional */ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, From dbf3a235cf04f8e0ed363832ba7b76818263fbf7 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 10:22:53 -0400 Subject: [PATCH 16/35] Revert "Added Brazilianwax governor" This reverts commit 86da7178626c0c32bafbe53b343b7e9e3ddb6849. --- drivers/cpufreq/Kconfig | 46 +- drivers/cpufreq/Makefile | 1 - drivers/cpufreq/cpufreq_brazilianwax.c | 824 ------------------------- include/linux/cpufreq.h | 3 - 4 files changed, 29 insertions(+), 845 deletions(-) delete mode 100644 drivers/cpufreq/cpufreq_brazilianwax.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index e0008dd1..3d6183ff 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -243,6 +243,22 @@ config CPU_FREQ_GOV_CONSERVATIVE tristate "'conservative' cpufreq governor" depends on CPU_FREQ help + 'conservative' - this driver is rather similar to the 'ondemand' + governor both in its source code and its purpose, the difference is + its optimisation for better suitability in a battery powered + environment. The frequency is gracefully increased and decreased + rather than jumping to 100% when speed is required. + + If you have a desktop machine then you should really be considering + the 'ondemand' governor instead, however if you are using a laptop, + PDA or even an AMD64 based computer (due to the unacceptable + step-by-step latency issues between the minimum and maximum frequency + transitions in the CPU) you will probably want to use this governor. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_conservative. + + For details, take a look at linux/Documentation/cpu-freq. config CPU_FREQ_DEFAULT_GOV_SMARTASS2 bool "smartass2" @@ -287,11 +303,14 @@ config CPU_FREQ_DEFAULT_GOV_SMARTASS help Use the CPUFreq governor 'smartass' as default. +endchoice + config CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND bool "intellidemand" select CPU_FREQ_GOV_INTELLIDEMAND select CPU_FREQ_GOV_PERFORMANCE help + Intelligent OnDemand Govneror based on Samsung Patched OnDemand config CPU_FREQ_DEFAULT_GOV_SCARY bool "scary" @@ -320,12 +339,7 @@ config CPU_FREQ_DEFAULT_GOV_ONDEMANDX help Use the CPUFreq governor 'lionheart' as default. -config CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX - bool "brazilianwax" - select CPU_FREQ_GOV_BRAZILIANWAX - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'brazilianwax as default. +endchoice config CPU_FREQ_GOV_PERFORMANCE tristate "'performance' governor" @@ -355,6 +369,7 @@ config CPU_FREQ_DEFAULT_GOV_LAZY select CPU_FREQ_GOV_PERFORMANCE help Use the CPUFreq governor 'lazy' as default. +endchoice config CPU_FREQ_GOV_SLP tristate "'slp' cpufreq policy governor" @@ -431,6 +446,8 @@ config CPU_FREQ_GOV_WHEATLEY tristate "'wheatley' cpufreq governor" depends on CPU_FREQ + If in doubt, say N. + config CPU_FREQ_GOV_SMARTASS tristate "'smartass' cpufreq governor" depends on CPU_FREQ @@ -454,9 +471,9 @@ config CPU_FREQ_GOV_SMARTASS2 'smartassV2' - a "smart" optimized governor for the hero! config CPU_FREQ_GOV_INTERACTIVEX - tristate "'interactiveX' cpufreq policy governor" - help - 'interactiveX' - Modified version of interactive with sleep+wake code. +tristate "'interactiveX' cpufreq policy governor" + help + 'interactiveX' - Modified version of interactive with sleep+wake code. config CPU_FREQ_GOV_LAGFREE tristate "'lagfree' cpufreq governor" @@ -532,14 +549,6 @@ config CPU_FREQ_GOV_ONDEMANDX tristate "'ondemandx' cpufreq governor" depends on CPU_FREQ -config CPU_FREQ_GOV_BRAZILIANWAX - tristate "'brazilianwax' cpufreq governor" - depends on CPU_FREQ - help - 'brazilianwax' - a "slightly more agressive smart" optimized governor! - If in doubt, say Y. - - menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" @@ -554,3 +563,6 @@ menu "PowerPC CPU frequency scaling drivers" depends on PPC32 || PPC64 source "drivers/cpufreq/Kconfig.powerpc" endmenu + +endif +endmenu diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 22e5500d..166a086e 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -36,7 +36,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o obj-$(CONFIG_CPU_FREQ_GOV_SAVAGEDZEN) += cpufreq_savagedzen.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMANDX) += cpufreq_ondemandx.o -obj-$(CONFIG_CPU_FREQ_GOV_BRAZILIANWAX) += cpufreq_brazilianwax.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/drivers/cpufreq/cpufreq_brazilianwax.c b/drivers/cpufreq/cpufreq_brazilianwax.c deleted file mode 100644 index f7c73442..00000000 --- a/drivers/cpufreq/cpufreq_brazilianwax.c +++ /dev/null @@ -1,824 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_brazilianwax.c - * - * Copyright (C) 2010 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Author: Erasmux - * - * Based on the interactive governor By Mike Chan (mike@android.com) - * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net) - * - * requires to add - * EXPORT_SYMBOL_GPL(nr_running); - * at the end of kernel/sched.c - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static void (*pm_idle_old)(void); -static atomic_t active_count = ATOMIC_INIT(0); - -struct brazilianwax_info_s { - struct cpufreq_policy *cur_policy; - struct timer_list timer; - u64 time_in_idle; - u64 idle_exit_time; - u64 freq_change_time; - u64 freq_change_time_in_idle; - int cur_cpu_load; - unsigned int force_ramp_up; - unsigned int enable; - int max_speed; - int min_speed; -}; -static DEFINE_PER_CPU(struct brazilianwax_info_s, brazilianwax_info); - -/* Workqueues handle frequency scaling */ -static struct workqueue_struct *up_wq; -static struct workqueue_struct *down_wq; -static struct work_struct freq_scale_work; - -static cpumask_t work_cpumask; -static unsigned int suspended; - -enum { - BRAZILIANWAX_DEBUG_JUMPS=1, - BRAZILIANWAX_DEBUG_LOAD=2 -}; - -/* - * Combination of the above debug flags. - */ -static unsigned long debug_mask; - -/* - * The minimum amount of time to spend at a frequency before we can ramp up. - */ -#define DEFAULT_UP_RATE_US 10000; -static unsigned long up_rate_us; - -/* - * The minimum amount of time to spend at a frequency before we can ramp down. - */ -#define DEFAULT_DOWN_RATE_US 20000; -static unsigned long down_rate_us; - -/* - * When ramping up frequency with no idle cycles jump to at least this frequency. - * Zero disables. Set a very high value to jump to policy max freqeuncy. - */ -#define DEFAULT_UP_MIN_FREQ 2000000 -static unsigned int up_min_freq; - -/* - * When sleep_max_freq>0 the frequency when suspended will be capped - * by this frequency. Also will wake up at max frequency of policy - * to minimize wakeup issues. - * Set sleep_max_freq=0 to disable this behavior. - */ -#define DEFAULT_SLEEP_MAX_FREQ 400000 -static unsigned int sleep_max_freq; - -/* - * The frequency to set when waking up from sleep. - * When sleep_max_freq=0 this will have no effect. - */ -#define DEFAULT_SLEEP_WAKEUP_FREQ 800000 -static unsigned int sleep_wakeup_freq; - -#define UP_THRESHOLD_FREQ 2000000 -static unsigned int threshold_freq; - -/* - * When awake_min_freq>0 the frequency when not suspended will not - * go below this frequency. - * Set awake_min_freq=0 to disable this behavior. - */ -#define DEFAULT_AWAKE_MIN_FREQ 200000 -static unsigned int awake_min_freq; - -static unsigned int suspendfreq = 400000; - -/* - * Sampling rate, I highly recommend to leave it at 2. - */ -#define DEFAULT_SAMPLE_RATE_JIFFIES 2 -static unsigned int sample_rate_jiffies; - -/* - * Minimum Freqeuncy delta when ramping up. - * zero disables and causes to always jump straight to max frequency. - */ -#define DEFAULT_RAMP_UP_STEP 600000 -static unsigned int ramp_up_step; - -/* - * Miminum Freqeuncy delta when ramping down. - * zero disables and will calculate ramp down according to load heuristic. - */ -#define DEFAULT_RAMP_DOWN_STEP 400000 -static unsigned int ramp_down_step; - -/* - * CPU freq will be increased if measured load > max_cpu_load; - */ -#define DEFAULT_MAX_CPU_LOAD 45 -static unsigned long max_cpu_load; - -#define DEFAULT_X_CPU_LOAD 70 -static unsigned long x_cpu_load; - -/* - * CPU freq will be decreased if measured load < min_cpu_load; - */ -#define DEFAULT_MIN_CPU_LOAD 25 -static unsigned long min_cpu_load; -#define RAPID_MIN_CPU_LOAD 5 -static unsigned long rapid_min_cpu_load; - - -static int cpufreq_governor_brazilianwax(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX -static -#endif -struct cpufreq_governor cpufreq_gov_brazilianwax = { - .name = "brazilianwax", - .governor = cpufreq_governor_brazilianwax, - .max_transition_latency = 9000000, - .owner = THIS_MODULE, -}; - -static void brazilianwax_update_min_max(struct brazilianwax_info_s *this_brazilianwax, struct cpufreq_policy *policy, int suspend) { - if (suspend) { - this_brazilianwax->min_speed = policy->min; - this_brazilianwax->max_speed = sleep_max_freq; -// this_brazilianwax->max_speed = // sleep_max_freq; but make sure it obeys the policy min/max -// policy->max > sleep_max_freq ? (sleep_max_freq > policy->min ? sleep_max_freq : policy->min) : policy->max; - } else { - this_brazilianwax->min_speed = // awake_min_freq; but make sure it obeys the policy min/max - policy->min < awake_min_freq ? (awake_min_freq < policy->max ? awake_min_freq : policy->max) : policy->min; - this_brazilianwax->max_speed = policy->max; - } -} - -inline static unsigned int validate_freq(struct brazilianwax_info_s *this_brazilianwax, int freq) { - if (freq > this_brazilianwax->max_speed) - return this_brazilianwax->max_speed; - if (freq < this_brazilianwax->min_speed) - return this_brazilianwax->min_speed; - return freq; -} - -static void reset_timer(unsigned long cpu, struct brazilianwax_info_s *this_brazilianwax) { - this_brazilianwax->time_in_idle = get_cpu_idle_time_us(cpu, &this_brazilianwax->idle_exit_time); - mod_timer(&this_brazilianwax->timer, jiffies + sample_rate_jiffies); -} - -static void cpufreq_brazilianwax_timer(unsigned long data) -{ - u64 delta_idle; - u64 delta_time; - int cpu_load; - u64 update_time; - u64 now_idle; - unsigned long new_rate; - - struct brazilianwax_info_s *this_brazilianwax = &per_cpu(brazilianwax_info, data); - struct cpufreq_policy *policy = this_brazilianwax->cur_policy; - - now_idle = get_cpu_idle_time_us(data, &update_time); - - if (this_brazilianwax->idle_exit_time == 0 || update_time == this_brazilianwax->idle_exit_time) - return; - - delta_idle = cputime64_sub(now_idle, this_brazilianwax->time_in_idle); - delta_time = cputime64_sub(update_time, this_brazilianwax->idle_exit_time); - //printk(KERN_INFO "brazilianwaxT: t=%llu i=%llu\n",cputime64_sub(update_time,this_brazilianwax->idle_exit_time),delta_idle); - - // If timer ran less than 1ms after short-term sample started, retry. - if (delta_time < 1000) { - if (!timer_pending(&this_brazilianwax->timer)) - reset_timer(data,this_brazilianwax); - return; - } - - if (delta_idle > delta_time) - cpu_load = 0; - else - cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time; - - if (debug_mask & BRAZILIANWAX_DEBUG_LOAD) - printk(KERN_INFO "brazilianwaxT @ %d: load %d (delta_time %llu)\n",policy->cur,cpu_load,delta_time); - - this_brazilianwax->cur_cpu_load = cpu_load; - - // Scale up if load is above max or if there where no idle cycles since coming out of idle, - // or when we are above our max speed for a very long time (should only happend if entering sleep - // at high loads) - if ((cpu_load > max_cpu_load || delta_idle == 0) && - !(policy->cur > this_brazilianwax->max_speed && - cputime64_sub(update_time, this_brazilianwax->freq_change_time) > 100*down_rate_us)) { - - if (policy->cur > this_brazilianwax->max_speed) { - reset_timer(data,this_brazilianwax); - } - - if (policy->cur == policy->max) - return; - - if (nr_running() < 1) - return; - - new_rate = up_rate_us; - - // minimize going above 1.8Ghz - if (policy->cur > up_min_freq) new_rate = 75000; - - if (cputime64_sub(update_time, this_brazilianwax->freq_change_time) < new_rate) - return; - - this_brazilianwax->force_ramp_up = 1; - cpumask_set_cpu(data, &work_cpumask); - queue_work(up_wq, &freq_scale_work); - return; - } - - /* - * There is a window where if the cpu utlization can go from low to high - * between the timer expiring, delta_idle will be > 0 and the cpu will - * be 100% busy, preventing idle from running, and this timer from - * firing. So setup another timer to fire to check cpu utlization. - * Do not setup the timer if there is no scheduled work or if at max speed. - */ - if (policy->cur < this_brazilianwax->max_speed && !timer_pending(&this_brazilianwax->timer) && nr_running() > 0) - reset_timer(data,this_brazilianwax); - - if (policy->cur == policy->min) - return; - - /* - * Do not scale down unless we have been at this frequency for the - * minimum sample time. - */ - if (cputime64_sub(update_time, this_brazilianwax->freq_change_time) < down_rate_us) - return; - - cpumask_set_cpu(data, &work_cpumask); - queue_work(down_wq, &freq_scale_work); -} - -static void cpufreq_idle(void) -{ - struct brazilianwax_info_s *this_brazilianwax = &per_cpu(brazilianwax_info, smp_processor_id()); - struct cpufreq_policy *policy = this_brazilianwax->cur_policy; - - if (!this_brazilianwax->enable) { - pm_idle_old(); - return; - } - - if (policy->cur == this_brazilianwax->min_speed && timer_pending(&this_brazilianwax->timer)) - del_timer(&this_brazilianwax->timer); - - pm_idle_old(); - - if (!timer_pending(&this_brazilianwax->timer)) - reset_timer(smp_processor_id(), this_brazilianwax); -} - -/* We use the same work function to sale up and down */ -static void cpufreq_brazilianwax_freq_change_time_work(struct work_struct *work) -{ - unsigned int cpu; - int new_freq, old_freq; - unsigned int force_ramp_up; - int cpu_load; - struct brazilianwax_info_s *this_brazilianwax; - struct cpufreq_policy *policy; - unsigned int relation = CPUFREQ_RELATION_L; - cpumask_t tmp_mask = work_cpumask; - for_each_cpu(cpu, &tmp_mask) { - this_brazilianwax = &per_cpu(brazilianwax_info, cpu); - policy = this_brazilianwax->cur_policy; - cpu_load = this_brazilianwax->cur_cpu_load; - force_ramp_up = this_brazilianwax->force_ramp_up && nr_running() > 1; - this_brazilianwax->force_ramp_up = 0; - - if (force_ramp_up || cpu_load > max_cpu_load) { - if (!suspended) { - if (force_ramp_up && up_min_freq && policy->cur < up_min_freq) { - // imoseyon - ramp up faster - new_freq = up_min_freq; - relation = CPUFREQ_RELATION_L; - } else if (ramp_up_step) { - new_freq = policy->cur + ramp_up_step; - relation = CPUFREQ_RELATION_H; - } else { - new_freq = this_brazilianwax->max_speed; - relation = CPUFREQ_RELATION_H; - } - // try to minimize going above 1.8Ghz - if ((new_freq > threshold_freq) && (cpu_load < 95)) { - new_freq = threshold_freq; - relation = CPUFREQ_RELATION_H; - } - } else { - new_freq = policy->cur + 150000; - if (new_freq > suspendfreq) new_freq = suspendfreq; - relation = CPUFREQ_RELATION_H; - } - - } else if (cpu_load < min_cpu_load) { - if (cpu_load < rapid_min_cpu_load) { - new_freq = awake_min_freq; - } else if (ramp_down_step) { - new_freq = policy->cur - ramp_down_step; - } else { - cpu_load += 100 - max_cpu_load; // dummy load. - new_freq = policy->cur * cpu_load / 100; - } - relation = CPUFREQ_RELATION_L; - } - else new_freq = policy->cur; - - old_freq = policy->cur; - new_freq = validate_freq(this_brazilianwax,new_freq); - - if (new_freq != policy->cur) { - if (debug_mask & BRAZILIANWAX_DEBUG_JUMPS) - printk(KERN_INFO "SmartassQ: jumping from %d to %d\n",policy->cur,new_freq); - - __cpufreq_driver_target(policy, new_freq, relation); - - this_brazilianwax->freq_change_time_in_idle = - get_cpu_idle_time_us(cpu,&this_brazilianwax->freq_change_time); - - if (relation == CPUFREQ_RELATION_L && old_freq == policy->cur) { - // step down one more time - new_freq = new_freq - 100000; - __cpufreq_driver_target(policy, new_freq, relation); - this_brazilianwax->freq_change_time_in_idle = - get_cpu_idle_time_us(cpu,&this_brazilianwax->freq_change_time); - } - if (relation == CPUFREQ_RELATION_H && old_freq == policy->cur) { - // step up one more time - new_freq = new_freq + 100000; - __cpufreq_driver_target(policy, new_freq, relation); - this_brazilianwax->freq_change_time_in_idle = - get_cpu_idle_time_us(cpu,&this_brazilianwax->freq_change_time); - } - } - - cpumask_clear_cpu(cpu, &work_cpumask); - } -} - -static ssize_t show_debug_mask(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%lu\n", debug_mask); -} - -static ssize_t store_debug_mask(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0) - debug_mask = input; - return res; -} - -static struct freq_attr debug_mask_attr = __ATTR(debug_mask, 0644, - show_debug_mask, store_debug_mask); - -static ssize_t show_up_rate_us(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%lu\n", up_rate_us); -} - -static ssize_t store_up_rate_us(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0 && input <= 100000000) - up_rate_us = input; - return res; -} - -static struct freq_attr up_rate_us_attr = __ATTR(up_rate_us, 0644, - show_up_rate_us, store_up_rate_us); - -static ssize_t show_down_rate_us(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%lu\n", down_rate_us); -} - -static ssize_t store_down_rate_us(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0 && input <= 100000000) - down_rate_us = input; - return res; -} - -static struct freq_attr down_rate_us_attr = __ATTR(down_rate_us, 0644, - show_down_rate_us, store_down_rate_us); - -static ssize_t show_up_min_freq(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", up_min_freq); -} - -static ssize_t store_up_min_freq(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - up_min_freq = input; - return res; -} - -static struct freq_attr up_min_freq_attr = __ATTR(up_min_freq, 0644, - show_up_min_freq, store_up_min_freq); - -static ssize_t show_sleep_max_freq(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", sleep_max_freq); -} - -static ssize_t store_sleep_max_freq(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - sleep_max_freq = input; - return res; -} - -static struct freq_attr sleep_max_freq_attr = __ATTR(sleep_max_freq, 0644, - show_sleep_max_freq, store_sleep_max_freq); - -static ssize_t show_sleep_wakeup_freq(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", sleep_wakeup_freq); -} - -static ssize_t store_sleep_wakeup_freq(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - sleep_wakeup_freq = input; - return res; -} - -static struct freq_attr sleep_wakeup_freq_attr = __ATTR(sleep_wakeup_freq, 0644, - show_sleep_wakeup_freq, store_sleep_wakeup_freq); - -static ssize_t show_awake_min_freq(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", awake_min_freq); -} - -static ssize_t store_awake_min_freq(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - awake_min_freq = input; - return res; -} - -static struct freq_attr awake_min_freq_attr = __ATTR(awake_min_freq, 0644, - show_awake_min_freq, store_awake_min_freq); - -static ssize_t show_sample_rate_jiffies(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", sample_rate_jiffies); -} - -static ssize_t store_sample_rate_jiffies(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input <= 1000) - sample_rate_jiffies = input; - return res; -} - -static struct freq_attr sample_rate_jiffies_attr = __ATTR(sample_rate_jiffies, 0644, - show_sample_rate_jiffies, store_sample_rate_jiffies); - -static ssize_t show_ramp_up_step(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", ramp_up_step); -} - -static ssize_t store_ramp_up_step(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - ramp_up_step = input; - return res; -} - -static struct freq_attr ramp_up_step_attr = __ATTR(ramp_up_step, 0644, - show_ramp_up_step, store_ramp_up_step); - -static ssize_t show_ramp_down_step(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", ramp_down_step); -} - -static ssize_t store_ramp_down_step(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - ramp_down_step = input; - return res; -} - -static struct freq_attr ramp_down_step_attr = __ATTR(ramp_down_step, 0644, - show_ramp_down_step, store_ramp_down_step); - -static ssize_t show_max_cpu_load(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%lu\n", max_cpu_load); -} - -static ssize_t store_max_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input <= 100) - max_cpu_load = input; - return res; -} - -static struct freq_attr max_cpu_load_attr = __ATTR(max_cpu_load, 0644, - show_max_cpu_load, store_max_cpu_load); - -static ssize_t show_min_cpu_load(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%lu\n", min_cpu_load); -} - -static ssize_t store_min_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input < 100) - min_cpu_load = input; - return res; -} - -static struct freq_attr min_cpu_load_attr = __ATTR(min_cpu_load, 0644, - show_min_cpu_load, store_min_cpu_load); - -static struct attribute * brazilianwax_attributes[] = { - &debug_mask_attr.attr, - &up_rate_us_attr.attr, - &down_rate_us_attr.attr, - &up_min_freq_attr.attr, - &sleep_max_freq_attr.attr, - &sleep_wakeup_freq_attr.attr, - &awake_min_freq_attr.attr, - &sample_rate_jiffies_attr.attr, - &ramp_up_step_attr.attr, - &ramp_down_step_attr.attr, - &max_cpu_load_attr.attr, - &min_cpu_load_attr.attr, - NULL, -}; - -static struct attribute_group brazilianwax_attr_group = { - .attrs = brazilianwax_attributes, - .name = "brazilianwax", -}; - -static void brazilianwax_suspend(int cpu, int suspend) -{ - struct brazilianwax_info_s *this_brazilianwax = &per_cpu(brazilianwax_info, smp_processor_id()); - struct cpufreq_policy *policy = this_brazilianwax->cur_policy; - unsigned int new_freq; - - if (!this_brazilianwax->enable || sleep_max_freq==0) // disable behavior for sleep_max_freq==0 - return; - - brazilianwax_update_min_max(this_brazilianwax,policy,suspend); - if (!suspend) { // resume at max speed: - suspended=0; - new_freq = validate_freq(this_brazilianwax,sleep_wakeup_freq); - - if (debug_mask & BRAZILIANWAX_DEBUG_JUMPS) - printk(KERN_INFO "SmartassS: awaking at %d\n",new_freq); - - __cpufreq_driver_target(policy, new_freq, - CPUFREQ_RELATION_L); - - if (policy->cur < this_brazilianwax->max_speed && !timer_pending(&this_brazilianwax->timer)) - reset_timer(smp_processor_id(),this_brazilianwax); - pr_info("[imoseyon] brazilianwax awake at %d\n", policy->cur); - } else { - // to avoid wakeup issues with quick sleep/wakeup don't change actual frequency when entering sleep - // to allow some time to settle down. - // we reset the timer, if eventually, even at full load the timer will lower the freqeuncy. - reset_timer(smp_processor_id(),this_brazilianwax); - - this_brazilianwax->freq_change_time_in_idle = - get_cpu_idle_time_us(cpu,&this_brazilianwax->freq_change_time); - - if (debug_mask & BRAZILIANWAX_DEBUG_JUMPS) - printk(KERN_INFO "SmartassS: suspending at %d\n",policy->cur); - __cpufreq_driver_target(policy, suspendfreq, CPUFREQ_RELATION_H); - pr_info("[imoseyon] brazilianwax suspending with %d\n", policy->cur); - suspended=1; - } -} - -static void brazilianwax_early_suspend(struct early_suspend *handler) { - int i; - for_each_online_cpu(i) - brazilianwax_suspend(i,1); -} - -static void brazilianwax_late_resume(struct early_suspend *handler) { - int i; - for_each_online_cpu(i) - brazilianwax_suspend(i,0); -} - -static struct early_suspend brazilianwax_power_suspend = { - .suspend = brazilianwax_early_suspend, - .resume = brazilianwax_late_resume, - .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, -}; - -static int cpufreq_governor_brazilianwax(struct cpufreq_policy *new_policy, - unsigned int event) -{ - unsigned int cpu = new_policy->cpu; - int rc; - struct brazilianwax_info_s *this_brazilianwax = &per_cpu(brazilianwax_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!new_policy->cur)) - return -EINVAL; - - /* - * Do not register the idle hook and create sysfs - * entries if we have already done so. - */ - if (atomic_inc_return(&active_count) <= 1) { - rc = sysfs_create_group(&new_policy->kobj, &brazilianwax_attr_group); - if (rc) - return rc; - pm_idle_old = pm_idle; - pm_idle = cpufreq_idle; - } - - this_brazilianwax->cur_policy = new_policy; - this_brazilianwax->enable = 1; - - // imoseyon - should only register for suspend when governor active - register_early_suspend(&brazilianwax_power_suspend); - pr_info("[imoseyon] brazilianwax active\n"); - - // notice no break here! - - case CPUFREQ_GOV_LIMITS: - brazilianwax_update_min_max(this_brazilianwax,new_policy,suspended); - if (this_brazilianwax->cur_policy->cur != this_brazilianwax->max_speed) { - if (debug_mask & BRAZILIANWAX_DEBUG_JUMPS) - printk(KERN_INFO "SmartassI: initializing to %d\n",this_brazilianwax->max_speed); - __cpufreq_driver_target(new_policy, this_brazilianwax->max_speed, CPUFREQ_RELATION_H); - } - break; - - case CPUFREQ_GOV_STOP: - del_timer(&this_brazilianwax->timer); - this_brazilianwax->enable = 0; - - if (atomic_dec_return(&active_count) > 1) - return 0; - sysfs_remove_group(&new_policy->kobj, - &brazilianwax_attr_group); - - pm_idle = pm_idle_old; - // unregister when governor exits - unregister_early_suspend(&brazilianwax_power_suspend); - pr_info("[imoseyon] brazilianwax inactive\n"); - break; - } - - return 0; -} - - -static int __init cpufreq_brazilianwax_init(void) -{ - unsigned int i; - struct brazilianwax_info_s *this_brazilianwax; - debug_mask = 0; - up_rate_us = DEFAULT_UP_RATE_US; - down_rate_us = DEFAULT_DOWN_RATE_US; - up_min_freq = DEFAULT_UP_MIN_FREQ; - sleep_max_freq = DEFAULT_SLEEP_MAX_FREQ; - sleep_wakeup_freq = DEFAULT_SLEEP_WAKEUP_FREQ; - threshold_freq = UP_THRESHOLD_FREQ; - awake_min_freq = DEFAULT_AWAKE_MIN_FREQ; - sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES; - ramp_up_step = DEFAULT_RAMP_UP_STEP; - ramp_down_step = DEFAULT_RAMP_DOWN_STEP; - max_cpu_load = DEFAULT_MAX_CPU_LOAD; - x_cpu_load = DEFAULT_X_CPU_LOAD; - min_cpu_load = DEFAULT_MIN_CPU_LOAD; - rapid_min_cpu_load = RAPID_MIN_CPU_LOAD; - - suspended = 0; - - /* Initalize per-cpu data: */ - for_each_possible_cpu(i) { - this_brazilianwax = &per_cpu(brazilianwax_info, i); - this_brazilianwax->enable = 0; - this_brazilianwax->cur_policy = 0; - this_brazilianwax->force_ramp_up = 0; - this_brazilianwax->max_speed = DEFAULT_SLEEP_WAKEUP_FREQ; - this_brazilianwax->min_speed = DEFAULT_AWAKE_MIN_FREQ; - this_brazilianwax->time_in_idle = 0; - this_brazilianwax->idle_exit_time = 0; - this_brazilianwax->freq_change_time = 0; - this_brazilianwax->freq_change_time_in_idle = 0; - this_brazilianwax->cur_cpu_load = 0; - // intialize timer: - init_timer_deferrable(&this_brazilianwax->timer); - this_brazilianwax->timer.function = cpufreq_brazilianwax_timer; - this_brazilianwax->timer.data = i; - } - - /* Scale up is high priority */ - up_wq = create_workqueue("kbrazilianwax_up"); - down_wq = create_workqueue("kbrazilianwax_down"); - - INIT_WORK(&freq_scale_work, cpufreq_brazilianwax_freq_change_time_work); - - pr_info("[imoseyon] brazilianwax enter\n"); - - return cpufreq_register_governor(&cpufreq_gov_brazilianwax); -} - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX -pure_initcall(cpufreq_brazilianwax_init); -#else -module_init(cpufreq_brazilianwax_init); -#endif - -static void __exit cpufreq_brazilianwax_exit(void) -{ - pr_info("[imoseyon] brazilianwax exit\n"); - cpufreq_unregister_governor(&cpufreq_gov_brazilianwax); - destroy_workqueue(up_wq); - destroy_workqueue(down_wq); -} - -module_exit(cpufreq_brazilianwax_exit); - -MODULE_AUTHOR ("Erasmux/imoseyon"); -MODULE_DESCRIPTION ("'cpufreq_brazilianwax' - A smart cpufreq governor optimized for the hero!"); -MODULE_LICENSE ("GPL"); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 8a6a819b..2d639de5 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -400,9 +400,6 @@ extern struct cpufreq_governor cpufreq_gov_savagedzen; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMANDX) extern struct cpufreq_governor cpufreq_gov_ondemandx; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemandx) -#elif defined(CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX) -extern struct cpufreq_governor cpufreq_gov_brazilianwax; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_brazilianwax) #endif From e1027e5c4ec929e5641326b347229b1d2a23c20f Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 10:23:07 -0400 Subject: [PATCH 17/35] Revert "Added OndemandX governor and cleanup of cpu driver files" This reverts commit 7f1503e80dde3441bb4d788ed5ddcc1f3e143b5d. --- drivers/cpufreq/Kconfig | 80 +-- drivers/cpufreq/Makefile | 6 - drivers/cpufreq/cpufreq_ondemandx.c | 829 ---------------------------- drivers/cpufreq/cpufreq_smartass2.c | 22 +- include/linux/cpufreq.h | 15 - 5 files changed, 31 insertions(+), 921 deletions(-) delete mode 100644 drivers/cpufreq/cpufreq_ondemandx.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 3d6183ff..ba4d2b48 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -305,41 +305,31 @@ config CPU_FREQ_DEFAULT_GOV_SMARTASS endchoice -config CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND - bool "intellidemand" - select CPU_FREQ_GOV_INTELLIDEMAND - select CPU_FREQ_GOV_PERFORMANCE - help - Intelligent OnDemand Govneror based on Samsung Patched OnDemand - -config CPU_FREQ_DEFAULT_GOV_SCARY - bool "scary" - select CPU_FREQ_GOV_SCARY - select CPU_FREQ_GOV_SCARY - help - Use the CPUFreq governor 'scary' as default. +config CPU_FREQ_GOV_DANCEDANCE + tristate "'dancedance' cpufreq governor" + depends on CPU_FREQ -config CPU_FREQ_DEFAULT_GOV_LIONHEART - bool "lionheart" - select CPU_FREQ_GOV_LIONHEART - help - Use the CPUFreq governor 'lionheart' as default. +config CPU_FREQ_GOV_NIGHTMARE + tristate "'nightmare' cpufreq governor" + depends on CPU_FREQ -config CPU_FREQ_DEFAULT_GOV_SAVAGEDZEN - bool "savagedzen" - select CPU_FREQ_GOV_SAVAGEDZEN - select CPU_FREQ_GOV_PERFORMANCE +config CPU_FREQ_GOV_ONDEMAND + tristate "'ondemand' cpufreq policy governor" + select CPU_FREQ_TABLE help - Use the CPUFreq governor 'lionheart' as default. + 'ondemand' - This driver adds a dynamic cpufreq policy governor. + The governor does a periodic polling and + changes frequency based on the CPU utilization. + The support for this governor depends on CPU capability to + do fast frequency switching (i.e, very low latency frequency + transitions). -config CPU_FREQ_DEFAULT_GOV_ONDEMANDX - bool "ondemandx" - select CPU_FREQ_GOV_ONDEMANDX - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lionheart' as default. + To compile this driver as a module, choose M here: the + module will be called cpufreq_ondemand. -endchoice + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. config CPU_FREQ_GOV_PERFORMANCE tristate "'performance' governor" @@ -515,40 +505,10 @@ config CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER Sampling latency rate multiplied by the cpu switch latency. Affects governor polling. -config CPU_FREQ_GOV_SCARY - tristate "'scary' cpufreq governor" - depends on CPU_FREQ - help - scary - a governor for cabbages - - If in doubt, say N. - config CPU_FREQ_GOV_LAZY tristate "'lazy' cpufreq governor" depends on CPU_FREQ -config CPU_FREQ_GOV_INTELLIDEMAND - tristate "'intellidemand' cpufreq governor" - depends on CPU_FREQ - help - 'intellidemand' - an intelligent ondemand governor - -config CPU_FREQ_GOV_LIONHEART - tristate "'lionheart' cpufreq governor" - depends on CPU_FREQ - -config CPU_FREQ_GOV_WHEATLEY - tristate "'wheatley' cpufreq governor" - depends on CPU_FREQ - -config CPU_FREQ_GOV_SAVAGEDZEN - tristate "'savagedzen' cpufreq governor" - depends on CPU_FREQ - -config CPU_FREQ_GOV_ONDEMANDX - tristate "'ondemandx' cpufreq governor" - depends on CPU_FREQ - menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 166a086e..da12e97c 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -30,12 +30,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_LAGFREE) += cpufreq_lagfree.o obj-$(CONFIG_CPU_FREQ_GOV_LULZACTIVE) += cpufreq_lulzactive.o obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o obj-$(CONFIG_CPU_FREQ_GOV_LAZY) += cpufreq_lazy.o -obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND)+= cpufreq_intellidemand.o -obj-$(CONFIG_CPU_FREQ_GOV_SCARY) += cpufreq_scary.o -obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o -obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o -obj-$(CONFIG_CPU_FREQ_GOV_SAVAGEDZEN) += cpufreq_savagedzen.o -obj-$(CONFIG_CPU_FREQ_GOV_ONDEMANDX) += cpufreq_ondemandx.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/drivers/cpufreq/cpufreq_ondemandx.c b/drivers/cpufreq/cpufreq_ondemandx.c deleted file mode 100644 index 31e9b99a..00000000 --- a/drivers/cpufreq/cpufreq_ondemandx.c +++ /dev/null @@ -1,829 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_ondemandx.c - * - * Copyright (C) 2001 Russell King - * (C) 2003 Venkatesh Pallipadi . - * Jun Nakajima - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ - -#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (5) -#define DEF_FREQUENCY_UP_THRESHOLD (85) -#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (10) -#define MICRO_FREQUENCY_UP_THRESHOLD (80) -#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) -#define MIN_FREQUENCY_UP_THRESHOLD (11) -#define MAX_FREQUENCY_UP_THRESHOLD (100) -#define DEF_SUSPEND_FREQ (500000) - -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) - * this governor will not work. - * All times here are in uS. - */ -#define MIN_SAMPLING_RATE_RATIO (1) - -static unsigned int min_sampling_rate; - -#define LATENCY_MULTIPLIER (1000) -#define MIN_LATENCY_MULTIPLIER (100) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) - -static void do_dbs_timer(struct work_struct *work); -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMANDX -static -#endif -struct cpufreq_governor cpufreq_gov_ondemandx = { - .name = "ondemandx", - .governor = cpufreq_governor_dbs, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, -}; - -/* Sampling types */ -enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; - -struct cpu_dbs_info_s { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_iowait; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - struct cpufreq_frequency_table *freq_table; - unsigned int freq_lo; - unsigned int freq_lo_jiffies; - unsigned int freq_hi_jiffies; - int cpu; - unsigned int sample_type:1; - /* - * percpu mutex that serializes governor limit change with - * do_dbs_timer invocation. We do not want do_dbs_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - -/* - * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on - * different CPUs. It protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); - -static struct workqueue_struct *kondemandx_wq; - -static struct dbs_tuners { - unsigned int sampling_rate; - unsigned int up_threshold; - unsigned int down_differential; - unsigned int ignore_nice; - unsigned int powersave_bias; - unsigned int io_is_busy; - unsigned int suspend_freq; -} dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, - .ignore_nice = 0, - .powersave_bias = 0, - .suspend_freq = DEF_SUSPEND_FREQ, -}; - -// used for imoseyon's mods -static unsigned int suspended = 0; -static void ondemandx_suspend(int suspend) -{ - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, smp_processor_id()); - if (dbs_enable==0) return; - if (!suspend) { // resume at max speed: - suspended = 0; - __cpufreq_driver_target(dbs_info->cur_policy, dbs_info->cur_policy->max, - CPUFREQ_RELATION_L); - pr_info("[imoseyon] ondemandx awake at %d\n", dbs_info->cur_policy->cur); - } else { - suspended = 1; - // let's give it a little breathing room - __cpufreq_driver_target(dbs_info->cur_policy, dbs_tuners_ins.suspend_freq, CPUFREQ_RELATION_H); - pr_info("[imoseyon] ondemandx suspended at %d\n", dbs_info->cur_policy->cur); - } -} - -static void ondemandx_early_suspend(struct early_suspend *handler) { - ondemandx_suspend(1); -} - -static void ondemandx_late_resume(struct early_suspend *handler) { - ondemandx_suspend(0); -} - -static struct early_suspend ondemandx_power_suspend = { - .suspend = ondemandx_early_suspend, - .resume = ondemandx_late_resume, - .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, -}; - -static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, - cputime64_t *wall) -{ - cputime64_t idle_time; - cputime64_t cur_wall_time; - cputime64_t busy_time; - - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, - kstat_cpu(cpu).cpustat.system); - - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); - - idle_time = cputime64_sub(cur_wall_time, busy_time); - if (wall) - *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); - - return (cputime64_t)jiffies_to_usecs(idle_time); -} - -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) -{ - u64 idle_time = get_cpu_idle_time_us(cpu, wall); - - if (idle_time == -1ULL) - return get_cpu_idle_time_jiffy(cpu, wall); - - return idle_time; -} - -static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) -{ - u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); - - if (iowait_time == -1ULL) - return 0; - - return iowait_time; -} - -/* - * Find right freq to be set now with powersave_bias on. - * Returns the freq_hi to be used right now and will set freq_hi_jiffies, - * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. - */ -static unsigned int powersave_bias_target(struct cpufreq_policy *policy, - unsigned int freq_next, - unsigned int relation) -{ - unsigned int freq_req, freq_reduc, freq_avg; - unsigned int freq_hi, freq_lo; - unsigned int index = 0; - unsigned int jiffies_total, jiffies_hi, jiffies_lo; - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, - policy->cpu); - - if (!dbs_info->freq_table) { - dbs_info->freq_lo = 0; - dbs_info->freq_lo_jiffies = 0; - return freq_next; - } - - cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, - relation, &index); - freq_req = dbs_info->freq_table[index].frequency; - freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; - freq_avg = freq_req - freq_reduc; - - /* Find freq bounds for freq_avg in freq_table */ - index = 0; - cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, - CPUFREQ_RELATION_H, &index); - freq_lo = dbs_info->freq_table[index].frequency; - index = 0; - cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, - CPUFREQ_RELATION_L, &index); - freq_hi = dbs_info->freq_table[index].frequency; - - /* Find out how long we have to be in hi and lo freqs */ - if (freq_hi == freq_lo) { - dbs_info->freq_lo = 0; - dbs_info->freq_lo_jiffies = 0; - return freq_lo; - } - jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - jiffies_hi = (freq_avg - freq_lo) * jiffies_total; - jiffies_hi += ((freq_hi - freq_lo) / 2); - jiffies_hi /= (freq_hi - freq_lo); - jiffies_lo = jiffies_total - jiffies_hi; - dbs_info->freq_lo = freq_lo; - dbs_info->freq_lo_jiffies = jiffies_lo; - dbs_info->freq_hi_jiffies = jiffies_hi; - return freq_hi; -} - -static void ondemandx_powersave_bias_init_cpu(int cpu) -{ - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - dbs_info->freq_table = cpufreq_frequency_get_table(cpu); - dbs_info->freq_lo = 0; -} - -static void ondemandx_powersave_bias_init(void) -{ - int i; - for_each_online_cpu(i) { - ondemandx_powersave_bias_init_cpu(i); - } -} - -/************************** sysfs interface ************************/ - -static ssize_t show_sampling_rate_min(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", min_sampling_rate); -} - -define_one_global_ro(sampling_rate_min); - -/* cpufreq_ondemand Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(io_is_busy, io_is_busy); -show_one(up_threshold, up_threshold); -show_one(ignore_nice_load, ignore_nice); -show_one(powersave_bias, powersave_bias); -show_one(suspend_freq, suspend_freq); - -static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.io_is_busy = !!input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || - input < MIN_FREQUENCY_UP_THRESHOLD) { - return -EINVAL; - } - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.up_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - mutex_lock(&dbs_mutex); - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - mutex_unlock(&dbs_mutex); - return count; - } - dbs_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(od_cpu_dbs_info, j); - dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; - - } - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - if (input > 1000) - input = 1000; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.powersave_bias = input; - ondemandx_powersave_bias_init(); - mutex_unlock(&dbs_mutex); - return count; -} - -static ssize_t store_suspend_freq(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - if (input > 1000000) - input = 1000000; - - if (input < 200000) - input = 200000; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.suspend_freq = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -define_one_global_rw(sampling_rate); -define_one_global_rw(io_is_busy); -define_one_global_rw(up_threshold); -define_one_global_rw(ignore_nice_load); -define_one_global_rw(powersave_bias); -define_one_global_rw(suspend_freq); - -static struct attribute *dbs_attributes[] = { - &sampling_rate_min.attr, - &sampling_rate.attr, - &up_threshold.attr, - &ignore_nice_load.attr, - &powersave_bias.attr, - &io_is_busy.attr, - &suspend_freq.attr, - NULL -}; - -static struct attribute_group dbs_attr_group = { - .attrs = dbs_attributes, - .name = "ondemandx", -}; - -/************************** sysfs end ************************/ - -static inline void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) -{ - if (dbs_tuners_ins.powersave_bias) - freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); - //else if (p->cur == p->max) - // return; - if (suspended && freq > dbs_tuners_ins.suspend_freq) { - freq = dbs_tuners_ins.suspend_freq; - __cpufreq_driver_target(p, freq, CPUFREQ_RELATION_H); - } else - __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? - CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); -} - -static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) -{ - unsigned int max_load_freq; - - struct cpufreq_policy *policy; - unsigned int j; - - this_dbs_info->freq_lo = 0; - policy = this_dbs_info->cur_policy; - - /* - * Every sampling_rate, we check, if current idle time is less - * than 20% (default), then we try to increase frequency - * Every sampling_rate, we look for a the lowest - * frequency which can sustain the load while keeping idle time over - * 30%. If such a frequency exist, we try to decrease to this frequency. - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of current frequency - */ - - /* Get Absolute Load - in terms of freq */ - max_load_freq = 0; - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; - unsigned int idle_time, wall_time, iowait_time; - unsigned int load, load_freq; - int freq_avg; - - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - - wall_time = (unsigned int) cputime64_sub(cur_wall_time, - j_dbs_info->prev_cpu_wall); - j_dbs_info->prev_cpu_wall = cur_wall_time; - - idle_time = (unsigned int) cputime64_sub(cur_idle_time, - j_dbs_info->prev_cpu_idle); - j_dbs_info->prev_cpu_idle = cur_idle_time; - - iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, - j_dbs_info->prev_cpu_iowait); - j_dbs_info->prev_cpu_iowait = cur_iowait_time; - - if (dbs_tuners_ins.ignore_nice) { - cputime64_t cur_nice; - unsigned long cur_nice_jiffies; - - cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, - j_dbs_info->prev_cpu_nice); - /* - * Assumption: nice time between sampling periods will - * be less than 2^32 jiffies for 32 bit sys - */ - cur_nice_jiffies = (unsigned long) - cputime64_to_jiffies64(cur_nice); - - j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; - idle_time += jiffies_to_usecs(cur_nice_jiffies); - } - - /* - * For the purpose of ondemandx, waiting for disk IO is an - * indication that you're performance critical, and not that - * the system is actually idle. So subtract the iowait time - * from the cpu idle time. - */ - - if (idle_time >= iowait_time) - idle_time -= iowait_time; - - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - load = 100 * (wall_time - idle_time) / wall_time; - - freq_avg = __cpufreq_driver_getavg(policy, j); - if (freq_avg <= 0) - freq_avg = policy->cur; - - load_freq = load * freq_avg; - if (load_freq > max_load_freq) - max_load_freq = load_freq; - } - - /* Check for frequency increase */ - if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { - /* If switching to max speed, apply sampling_down_factor */ - dbs_freq_increase(policy, policy->max); - return; - } - - /* Check for frequency decrease */ - /* if we cannot reduce the frequency anymore, break out early */ - if (policy->cur == policy->min) { - __cpufreq_driver_target(policy, policy->min, - CPUFREQ_RELATION_L); - return; - } - - /* - * The optimal frequency is the frequency that is the lowest that - * can support the current CPU usage without triggering the up - * policy. To be safe, we focus 10 points under the threshold. - */ - if (max_load_freq < - (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * - policy->cur) { - unsigned int freq_next; - freq_next = max_load_freq / - (dbs_tuners_ins.up_threshold - - dbs_tuners_ins.down_differential); - - if (freq_next < policy->min) - freq_next = policy->min; - - if (!dbs_tuners_ins.powersave_bias) { - __cpufreq_driver_target(policy, freq_next, - CPUFREQ_RELATION_L); - } else { - int freq = powersave_bias_target(policy, freq_next, - CPUFREQ_RELATION_L); - __cpufreq_driver_target(policy, freq, - CPUFREQ_RELATION_L); - } - } -} - -static void do_dbs_timer(struct work_struct *work) -{ - struct cpu_dbs_info_s *dbs_info = - container_of(work, struct cpu_dbs_info_s, work.work); - unsigned int cpu = dbs_info->cpu; - int sample_type = dbs_info->sample_type; - - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - -#if 0 - /* Don't care too much about synchronizing the workqueue in both cpus */ - if (num_online_cpus() > 1) - delay -= jiffies % delay; -#endif - - mutex_lock(&dbs_info->timer_mutex); - - /* Common NORMAL_SAMPLE setup */ - dbs_info->sample_type = DBS_NORMAL_SAMPLE; - if (!dbs_tuners_ins.powersave_bias || - sample_type == DBS_NORMAL_SAMPLE) { - dbs_check_cpu(dbs_info); - if (dbs_info->freq_lo) { - /* Setup timer for SUB_SAMPLE */ - dbs_info->sample_type = DBS_SUB_SAMPLE; - delay = dbs_info->freq_hi_jiffies; - } - } else { - if (!suspended) - __cpufreq_driver_target(dbs_info->cur_policy, - dbs_info->freq_lo, CPUFREQ_RELATION_H); - } - queue_delayed_work_on(cpu, kondemandx_wq, &dbs_info->work, delay); - mutex_unlock(&dbs_info->timer_mutex); -} - -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) -{ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - dbs_info->sample_type = DBS_NORMAL_SAMPLE; - INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - queue_delayed_work_on(dbs_info->cpu, kondemandx_wq, &dbs_info->work, - delay); -} - -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) -{ - cancel_delayed_work_sync(&dbs_info->work); -} - -/* - * Not all CPUs want IO time to be accounted as busy; this dependson how - * efficient idling at a higher frequency/voltage is. - * Pavel Machek says this is not so for various generations of AMD and old - * Intel systems. - * Mike Chan (androidlcom) calis this is also not true for ARM. - * Because of this, whitelist specific known (series) of CPUs by default, and - * leave all others up to the user. - */ -static int should_io_be_busy(void) -{ -#if defined(CONFIG_X86) - /* - * For Intel, Core 2 (model 15) andl later have an efficient idle. - */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_data.x86 == 6 && - boot_cpu_data.x86_model >= 15) - return 1; -#endif -#if defined(CONFIG_ARM) - return 1; -#endif - return 0; -} - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - mutex_lock(&dbs_mutex); - - dbs_enable++; - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) { - j_dbs_info->prev_cpu_nice = - kstat_cpu(j).cpustat.nice; - } - } - this_dbs_info->cpu = cpu; - ondemandx_powersave_bias_init_cpu(cpu); - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - unsigned int latency; - - rc = sysfs_create_group(cpufreq_global_kobject, - &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - /* Bring kernel and HW constraints together */ - min_sampling_rate = max(min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - dbs_tuners_ins.sampling_rate = - max(min_sampling_rate, - latency * LATENCY_MULTIPLIER); - dbs_tuners_ins.io_is_busy = should_io_be_busy(); - } - mutex_unlock(&dbs_mutex); - - mutex_init(&this_dbs_info->timer_mutex); - dbs_timer_init(this_dbs_info); - register_early_suspend(&ondemandx_power_suspend); - pr_info("[imoseyon] ondemandx active\n"); - break; - - case CPUFREQ_GOV_STOP: - dbs_timer_exit(this_dbs_info); - - mutex_lock(&dbs_mutex); - mutex_destroy(&this_dbs_info->timer_mutex); - dbs_enable--; - mutex_unlock(&dbs_mutex); - if (!dbs_enable) - sysfs_remove_group(cpufreq_global_kobject, - &dbs_attr_group); - unregister_early_suspend(&ondemandx_power_suspend); - pr_info("[imoseyon] ondemandx inactive\n"); - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_dbs_info->timer_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&this_dbs_info->timer_mutex); - break; - } - return 0; -} - -static int __init cpufreq_gov_dbs_init(void) -{ - int err; - cputime64_t wall; - u64 idle_time; - int cpu = get_cpu(); - - idle_time = get_cpu_idle_time_us(cpu, &wall); - put_cpu(); - if (idle_time != -1ULL) { - /* Idle micro accounting is supported. Use finer thresholds */ - dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; - dbs_tuners_ins.down_differential = - MICRO_FREQUENCY_DOWN_DIFFERENTIAL; - /* - * In no_hz/micro accounting case we set the minimum frequency - * not depending on HZ, but fixed (very low). The deferred - * timer might skip some samples if idle/sleeping as needed. - */ - min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; - } else { - /* For correct statistics, we need 10 ticks for each measure */ - min_sampling_rate = - MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(1); - } - - kondemandx_wq = alloc_workqueue("kondemandx", WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1); - if (!kondemandx_wq) { - printk(KERN_ERR "Creation of kondemandx failed\n"); - return -EFAULT; - } - - err = cpufreq_register_governor(&cpufreq_gov_ondemandx); - if (err) - destroy_workqueue(kondemandx_wq); - - return err; -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - pr_info("[imoseyon] ondemandx exit\n"); - cpufreq_unregister_governor(&cpufreq_gov_ondemandx); - destroy_workqueue(kondemandx_wq); -} - - -MODULE_AUTHOR("Venkatesh Pallipadi "); -MODULE_AUTHOR("Alexey Starikovskiy "); -MODULE_DESCRIPTION("'cpufreq_ondemandx' - A dynamic cpufreq governor for " - "Low Latency Frequency Transition capable processors"); -MODULE_LICENSE("GPL"); - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMANDX -fs_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); -#endif -module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_smartass2.c b/drivers/cpufreq/cpufreq_smartass2.c index 09d8e6e2..05c39ded 100644 --- a/drivers/cpufreq/cpufreq_smartass2.c +++ b/drivers/cpufreq/cpufreq_smartass2.c @@ -467,7 +467,7 @@ static ssize_t store_debug_mask(struct kobject *kobj, struct attribute *attr, co res = strict_strtoul(buf, 0, &input); if (res >= 0) debug_mask = input; - return count; + return res; } static ssize_t show_up_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) @@ -482,7 +482,7 @@ static ssize_t store_up_rate_us(struct kobject *kobj, struct attribute *attr, co res = strict_strtoul(buf, 0, &input); if (res >= 0 && input >= 0 && input <= 100000000) up_rate_us = input; - return count; + return res; } static ssize_t show_down_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) @@ -497,7 +497,7 @@ static ssize_t store_down_rate_us(struct kobject *kobj, struct attribute *attr, res = strict_strtoul(buf, 0, &input); if (res >= 0 && input >= 0 && input <= 100000000) down_rate_us = input; - return count; + return res; } static ssize_t show_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) @@ -515,7 +515,7 @@ static ssize_t store_sleep_ideal_freq(struct kobject *kobj, struct attribute *at if (suspended) smartass_update_min_max_allcpus(); } - return count; + return res; } static ssize_t show_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, char *buf) @@ -530,7 +530,7 @@ static ssize_t store_sleep_wakeup_freq(struct kobject *kobj, struct attribute *a res = strict_strtoul(buf, 0, &input); if (res >= 0 && input >= 0) sleep_wakeup_freq = input; - return count; + return res; } static ssize_t show_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) @@ -548,7 +548,7 @@ static ssize_t store_awake_ideal_freq(struct kobject *kobj, struct attribute *at if (!suspended) smartass_update_min_max_allcpus(); } - return count; + return res; } static ssize_t show_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, char *buf) @@ -563,7 +563,7 @@ static ssize_t store_sample_rate_jiffies(struct kobject *kobj, struct attribute res = strict_strtoul(buf, 0, &input); if (res >= 0 && input > 0 && input <= 1000) sample_rate_jiffies = input; - return count; + return res; } static ssize_t show_ramp_up_step(struct kobject *kobj, struct attribute *attr, char *buf) @@ -578,7 +578,7 @@ static ssize_t store_ramp_up_step(struct kobject *kobj, struct attribute *attr, res = strict_strtoul(buf, 0, &input); if (res >= 0 && input >= 0) ramp_up_step = input; - return count; + return res; } static ssize_t show_ramp_down_step(struct kobject *kobj, struct attribute *attr, char *buf) @@ -593,7 +593,7 @@ static ssize_t store_ramp_down_step(struct kobject *kobj, struct attribute *attr res = strict_strtoul(buf, 0, &input); if (res >= 0 && input >= 0) ramp_down_step = input; - return count; + return res; } static ssize_t show_max_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) @@ -608,7 +608,7 @@ static ssize_t store_max_cpu_load(struct kobject *kobj, struct attribute *attr, res = strict_strtoul(buf, 0, &input); if (res >= 0 && input > 0 && input <= 100) max_cpu_load = input; - return count; + return res; } static ssize_t show_min_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) @@ -623,7 +623,7 @@ static ssize_t store_min_cpu_load(struct kobject *kobj, struct attribute *attr, res = strict_strtoul(buf, 0, &input); if (res >= 0 && input > 0 && input < 100) min_cpu_load = input; - return count; + return res; } #define define_global_rw_attr(_name) \ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 2d639de5..ec8dea51 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -385,21 +385,6 @@ extern struct cpufreq_governor cpufreq_gov_smartass; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_LAZY) extern struct cpufreq_governor cpufreq_gov_lazy; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_lazy) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SCARY) -extern struct cpufreq_governor cpufreq_gov_scary; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_scary) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_LIONHEART) -extern struct cpufreq_governor cpufreq_gov_lionheart; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_lionheart) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_WHEATLEY) -extern struct cpufreq_governor cpufreq_gov_wheatley; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_wheatley) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SAVAGEDZEN) -extern struct cpufreq_governor cpufreq_gov_savagedzen; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_savagedzen) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMANDX) -extern struct cpufreq_governor cpufreq_gov_ondemandx; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemandx) #endif From 61f29fdf04db2e0d8852e3399b282d6f4781b238 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 10:23:47 -0400 Subject: [PATCH 18/35] Revert "Revert "Added Intellidemand, Scary Governor"" This reverts commit 26eb4d4d647ba9605db70f52b56eb22e57349dda. --- drivers/cpufreq/cpufreq_scary.c | 744 ++++++++++++++++++++++++++++++++ 1 file changed, 744 insertions(+) create mode 100644 drivers/cpufreq/cpufreq_scary.c diff --git a/drivers/cpufreq/cpufreq_scary.c b/drivers/cpufreq/cpufreq_scary.c new file mode 100644 index 00000000..c5670f97 --- /dev/null +++ b/drivers/cpufreq/cpufreq_scary.c @@ -0,0 +1,744 @@ +/* + Scary governor based off of conservatives source with some of smartasses features + + For devs - If you're going to port this driver to other devices, make sure to edit the default sleep frequencies & prev frequencies or else you might be going outside your devices hardware limits. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_UP_THRESHOLD (80) +#define DEF_FREQUENCY_DOWN_THRESHOLD (45) +#define DEFAULT_SLEEP_MAX_FREQ 245760 +#define DEFAULT_SLEEP_MIN_FREQ 122880 +#define DEFAULT_SLEEP_PREV_FREQ 122880 //This is so that if there are any issues resulting in sleep_prev_freq getting set, there will be a backup freq +#define DEFAULT_PREV_MAX 614400 +static unsigned int suspended; +static unsigned int sleep_max_freq=DEFAULT_SLEEP_MAX_FREQ; +static unsigned int sleep_min_freq=DEFAULT_SLEEP_MIN_FREQ; +static unsigned int sleep_prev_freq=DEFAULT_SLEEP_PREV_FREQ; +static unsigned int sleep_prev_max=DEFAULT_PREV_MAX; + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. + * All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (10) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + + +static void do_dbs_timer(struct work_struct *work); + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + unsigned int down_skip; + unsigned int requested_freq; + int cpu; + unsigned int enable:1; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on + * different CPUs. It protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct workqueue_struct *kconservative_wq; + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int ignore_nice; + unsigned int freq_step; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .ignore_nice = 0, + .freq_step = 5, +}; + +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, + cputime64_t *wall) +{ + cputime64_t idle_time; + cputime64_t cur_wall_time; + cputime64_t busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, + kstat_cpu(cpu).cpustat.system); + + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); + + idle_time = cputime64_sub(cur_wall_time, busy_time); + if (wall) + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + + return (cputime64_t)jiffies_to_usecs(idle_time);; +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + + return idle_time; +} + +/* keep track of frequency transitions */ +static int +dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_freqs *freq = data; + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, + freq->cpu); + + struct cpufreq_policy *policy; + + if (!this_dbs_info->enable) + return 0; + + policy = this_dbs_info->cur_policy; + + /* + * we only care if our internally tracked freq moves outside + * the 'valid' ranges of freqency available to us otherwise + * we do not change it + */ + if (this_dbs_info->requested_freq > policy->max + || this_dbs_info->requested_freq < policy->min) + this_dbs_info->requested_freq = freq->new; + + return 0; +} + +static struct notifier_block dbs_cpufreq_notifier_block = { + .notifier_call = dbs_cpufreq_notifier +}; + +/************************** sysfs interface ************************/ +static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) +{ + printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max " + "sysfs file is deprecated - used by: %s\n", current->comm); + return sprintf(buf, "%u\n", -1U); +} + +static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +#define define_one_ro(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +define_one_ro(sampling_rate_max); +define_one_ro(sampling_rate_min); + +/* cpufreq_conservative Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct cpufreq_policy *unused, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(sampling_down_factor, sampling_down_factor); +show_one(up_threshold, up_threshold); +show_one(down_threshold, down_threshold); +show_one(ignore_nice_load, ignore_nice); +show_one(freq_step, freq_step); + +static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_down_factor = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_sampling_rate(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_up_threshold(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input > 100 || + input <= dbs_tuners_ins.down_threshold) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.up_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_down_threshold(struct cpufreq_policy *unused, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + mutex_lock(&dbs_mutex); + /* cannot be lower than 11 otherwise freq will not fall */ + if (ret != 1 || input < 11 || input > 100 || + input >= dbs_tuners_ins.up_threshold) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.down_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + mutex_lock(&dbs_mutex); + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + mutex_unlock(&dbs_mutex); + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(cs_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + } + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_freq_step(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input > 100) + input = 100; + + /* no need to test here if freq_step is zero as the user might actually + * want this, they would be crazy though :) */ + mutex_lock(&dbs_mutex); + dbs_tuners_ins.freq_step = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +#define define_one_rw(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + +define_one_rw(sampling_rate); +define_one_rw(sampling_down_factor); +define_one_rw(up_threshold); +define_one_rw(down_threshold); +define_one_rw(ignore_nice_load); +define_one_rw(freq_step); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_max.attr, + &sampling_rate_min.attr, + &sampling_rate.attr, + &sampling_down_factor.attr, + &up_threshold.attr, + &down_threshold.attr, + &ignore_nice_load.attr, + &freq_step.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "scary", +}; + +/************************** sysfs end ************************/ + +/********** Porting smartass code for suspension**********/ +static void smartass_suspend(int cpu, int suspend) +{ + struct cpu_dbs_info_s *this_smartass = &per_cpu(cs_cpu_dbs_info, smp_processor_id()); + struct cpufreq_policy *policy = this_smartass->cur_policy; + unsigned int new_freq; + + if (!this_smartass->enable || sleep_max_freq==0) // disable behavior for sleep_max_freq==0 + return; + + if (suspend) + { + //If the current min speed is greater than the max sleep, we reset the min to 120mhz, for battery savings + if (policy->min >= sleep_max_freq) + { + sleep_prev_freq=policy->min; + policy->min= sleep_min_freq; + } + if (policy->max > sleep_max_freq) + { + sleep_prev_max=policy->max; + policy->max=sleep_max_freq; + } + if (policy->cur > sleep_max_freq) + { + new_freq = sleep_max_freq; + if (new_freq > policy->max) + new_freq = policy->max; + if (new_freq < policy->min) + new_freq = policy->min; + __cpufreq_driver_target(policy, new_freq,CPUFREQ_RELATION_H); + } + + } + else //Resetting the min speed + { + if (policy->min < sleep_prev_freq) + policy->min=sleep_prev_freq; + if (policy->max < sleep_prev_max) + policy->max=sleep_prev_max; + } + +} + +static void smartass_early_suspend(struct early_suspend *handler) +{ + int i; + suspended = 1; + for_each_online_cpu(i) + smartass_suspend(i,1); +} + +static void smartass_late_resume(struct early_suspend *handler) +{ + int i; + suspended = 0; + for_each_online_cpu(i) + smartass_suspend(i,0); +} + +static struct early_suspend smartass_power_suspend = +{ + .suspend = smartass_early_suspend, + .resume = smartass_late_resume, +}; + + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + //Current freq +// unsigned int new_freq; + unsigned int load = 0; + unsigned int freq_target; + + struct cpufreq_policy *policy; + unsigned int j; + + policy = this_dbs_info->cur_policy; + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate*sampling_down_factor, we check, if current + * idle time is more than 80%, then we try to decrease frequency + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of maximum frequency + */ + + /* Get Absolute Load */ + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time; + unsigned int idle_time, wall_time; + + j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, + j_dbs_info->prev_cpu_nice); + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + } + + /* + * break out if we 'cannot' reduce the speed as the user might + * want freq_step to be zero + */ + if (dbs_tuners_ins.freq_step == 0) + return; + + /* Check for frequency increase */ + if (load > dbs_tuners_ins.up_threshold) + { + this_dbs_info->down_skip = 0; + + /* if we are already at full speed then break out early */ + if (this_dbs_info->requested_freq == policy->max) + return; + freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; + /* max freq cannot be less than 100. but who knows.... */ + if (unlikely(freq_target == 0)) + freq_target = 5; + + this_dbs_info->requested_freq += freq_target; + if (this_dbs_info->requested_freq > policy->max) + this_dbs_info->requested_freq = policy->max; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq,CPUFREQ_RELATION_H); + + return; + } + + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (load < (dbs_tuners_ins.down_threshold - 10)) { + freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; + + this_dbs_info->requested_freq -= freq_target; + if (this_dbs_info->requested_freq < policy->min) + this_dbs_info->requested_freq = policy->min; + + /* + * if we cannot reduce the frequency anymore, break out early + */ + if (policy->cur == policy->min) + return; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, + CPUFREQ_RELATION_H); + return; + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + delay -= jiffies % delay; + + mutex_lock(&dbs_info->timer_mutex); + + dbs_check_cpu(dbs_info); + + queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + delay -= jiffies % delay; + + dbs_info->enable = 1; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, + delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + dbs_info->enable = 0; + cancel_delayed_work_sync(&dbs_info->work); +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + suspended=0; + + this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + + rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kstat_cpu(j).cpustat.nice; + } + } + this_dbs_info->down_skip = 0; + this_dbs_info->requested_freq = policy->cur; + + mutex_init(&this_dbs_info->timer_mutex); + dbs_enable++; + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + + /* + * conservative does not implement micro like ondemand + * governor, thus we are bound to jiffes/HZ + */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); + + cpufreq_register_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + mutex_unlock(&dbs_mutex); + + dbs_timer_init(this_dbs_info); + + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + sysfs_remove_group(&policy->kobj, &dbs_attr_group); + dbs_enable--; + mutex_destroy(&this_dbs_info->timer_mutex); + + /* + * Stop the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 0) + cpufreq_unregister_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + mutex_unlock(&dbs_mutex); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target( + this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target( + this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + + break; + } + return 0; +} + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_Scary +static +#endif +struct cpufreq_governor cpufreq_gov_scary = { + .name = "Scary", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +static int __init cpufreq_gov_dbs_init(void) +{ + int err; + + kconservative_wq = create_workqueue("kconservative"); + if (!kconservative_wq) { + printk(KERN_ERR "Creation of kconservative failed\n"); + return -EFAULT; + } + register_early_suspend(&smartass_power_suspend); + err = cpufreq_register_governor(&cpufreq_gov_scary); + if (err) + destroy_workqueue(kconservative_wq); + + return err; +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_scary); + destroy_workqueue(kconservative_wq); +} + + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCARY +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); + From 68b536a2cdbca4f78c224cd592cfc0fd6775be08 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 10:24:02 -0400 Subject: [PATCH 19/35] Revert "Added Intellidemand, Scary Governor" This reverts commit ea71572ec635fa9907b87b6b0892cf52ee6804bf. --- drivers/cpufreq/cpufreq_scary.c | 744 -------------------------------- 1 file changed, 744 deletions(-) delete mode 100644 drivers/cpufreq/cpufreq_scary.c diff --git a/drivers/cpufreq/cpufreq_scary.c b/drivers/cpufreq/cpufreq_scary.c deleted file mode 100644 index c5670f97..00000000 --- a/drivers/cpufreq/cpufreq_scary.c +++ /dev/null @@ -1,744 +0,0 @@ -/* - Scary governor based off of conservatives source with some of smartasses features - - For devs - If you're going to port this driver to other devices, make sure to edit the default sleep frequencies & prev frequencies or else you might be going outside your devices hardware limits. -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ - -#define DEF_FREQUENCY_UP_THRESHOLD (80) -#define DEF_FREQUENCY_DOWN_THRESHOLD (45) -#define DEFAULT_SLEEP_MAX_FREQ 245760 -#define DEFAULT_SLEEP_MIN_FREQ 122880 -#define DEFAULT_SLEEP_PREV_FREQ 122880 //This is so that if there are any issues resulting in sleep_prev_freq getting set, there will be a backup freq -#define DEFAULT_PREV_MAX 614400 -static unsigned int suspended; -static unsigned int sleep_max_freq=DEFAULT_SLEEP_MAX_FREQ; -static unsigned int sleep_min_freq=DEFAULT_SLEEP_MIN_FREQ; -static unsigned int sleep_prev_freq=DEFAULT_SLEEP_PREV_FREQ; -static unsigned int sleep_prev_max=DEFAULT_PREV_MAX; - -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) - * this governor will not work. - * All times here are in uS. - */ -#define MIN_SAMPLING_RATE_RATIO (2) - -static unsigned int min_sampling_rate; - -#define LATENCY_MULTIPLIER (1000) -#define MIN_LATENCY_MULTIPLIER (100) -#define DEF_SAMPLING_DOWN_FACTOR (1) -#define MAX_SAMPLING_DOWN_FACTOR (10) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) - - -static void do_dbs_timer(struct work_struct *work); - -struct cpu_dbs_info_s { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - unsigned int down_skip; - unsigned int requested_freq; - int cpu; - unsigned int enable:1; - /* - * percpu mutex that serializes governor limit change with - * do_dbs_timer invocation. We do not want do_dbs_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - -/* - * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on - * different CPUs. It protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); - -static struct workqueue_struct *kconservative_wq; - -static struct dbs_tuners { - unsigned int sampling_rate; - unsigned int sampling_down_factor; - unsigned int up_threshold; - unsigned int down_threshold; - unsigned int ignore_nice; - unsigned int freq_step; -} dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, - .ignore_nice = 0, - .freq_step = 5, -}; - -static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, - cputime64_t *wall) -{ - cputime64_t idle_time; - cputime64_t cur_wall_time; - cputime64_t busy_time; - - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, - kstat_cpu(cpu).cpustat.system); - - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); - - idle_time = cputime64_sub(cur_wall_time, busy_time); - if (wall) - *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); - - return (cputime64_t)jiffies_to_usecs(idle_time);; -} - -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) -{ - u64 idle_time = get_cpu_idle_time_us(cpu, wall); - - if (idle_time == -1ULL) - return get_cpu_idle_time_jiffy(cpu, wall); - - return idle_time; -} - -/* keep track of frequency transitions */ -static int -dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, - void *data) -{ - struct cpufreq_freqs *freq = data; - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, - freq->cpu); - - struct cpufreq_policy *policy; - - if (!this_dbs_info->enable) - return 0; - - policy = this_dbs_info->cur_policy; - - /* - * we only care if our internally tracked freq moves outside - * the 'valid' ranges of freqency available to us otherwise - * we do not change it - */ - if (this_dbs_info->requested_freq > policy->max - || this_dbs_info->requested_freq < policy->min) - this_dbs_info->requested_freq = freq->new; - - return 0; -} - -static struct notifier_block dbs_cpufreq_notifier_block = { - .notifier_call = dbs_cpufreq_notifier -}; - -/************************** sysfs interface ************************/ -static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) -{ - printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max " - "sysfs file is deprecated - used by: %s\n", current->comm); - return sprintf(buf, "%u\n", -1U); -} - -static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", min_sampling_rate); -} - -#define define_one_ro(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0444, show_##_name, NULL) - -define_one_ro(sampling_rate_max); -define_one_ro(sampling_rate_min); - -/* cpufreq_conservative Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct cpufreq_policy *unused, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(sampling_down_factor, sampling_down_factor); -show_one(up_threshold, up_threshold); -show_one(down_threshold, down_threshold); -show_one(ignore_nice_load, ignore_nice); -show_one(freq_step, freq_step); - -static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.sampling_down_factor = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_sampling_rate(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_up_threshold(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - mutex_lock(&dbs_mutex); - if (ret != 1 || input > 100 || - input <= dbs_tuners_ins.down_threshold) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.up_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_down_threshold(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - mutex_lock(&dbs_mutex); - /* cannot be lower than 11 otherwise freq will not fall */ - if (ret != 1 || input < 11 || input > 100 || - input >= dbs_tuners_ins.up_threshold) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.down_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - mutex_lock(&dbs_mutex); - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - mutex_unlock(&dbs_mutex); - return count; - } - dbs_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(cs_cpu_dbs_info, j); - dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; - } - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_freq_step(struct cpufreq_policy *policy, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - if (input > 100) - input = 100; - - /* no need to test here if freq_step is zero as the user might actually - * want this, they would be crazy though :) */ - mutex_lock(&dbs_mutex); - dbs_tuners_ins.freq_step = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -#define define_one_rw(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0644, show_##_name, store_##_name) - -define_one_rw(sampling_rate); -define_one_rw(sampling_down_factor); -define_one_rw(up_threshold); -define_one_rw(down_threshold); -define_one_rw(ignore_nice_load); -define_one_rw(freq_step); - -static struct attribute *dbs_attributes[] = { - &sampling_rate_max.attr, - &sampling_rate_min.attr, - &sampling_rate.attr, - &sampling_down_factor.attr, - &up_threshold.attr, - &down_threshold.attr, - &ignore_nice_load.attr, - &freq_step.attr, - NULL -}; - -static struct attribute_group dbs_attr_group = { - .attrs = dbs_attributes, - .name = "scary", -}; - -/************************** sysfs end ************************/ - -/********** Porting smartass code for suspension**********/ -static void smartass_suspend(int cpu, int suspend) -{ - struct cpu_dbs_info_s *this_smartass = &per_cpu(cs_cpu_dbs_info, smp_processor_id()); - struct cpufreq_policy *policy = this_smartass->cur_policy; - unsigned int new_freq; - - if (!this_smartass->enable || sleep_max_freq==0) // disable behavior for sleep_max_freq==0 - return; - - if (suspend) - { - //If the current min speed is greater than the max sleep, we reset the min to 120mhz, for battery savings - if (policy->min >= sleep_max_freq) - { - sleep_prev_freq=policy->min; - policy->min= sleep_min_freq; - } - if (policy->max > sleep_max_freq) - { - sleep_prev_max=policy->max; - policy->max=sleep_max_freq; - } - if (policy->cur > sleep_max_freq) - { - new_freq = sleep_max_freq; - if (new_freq > policy->max) - new_freq = policy->max; - if (new_freq < policy->min) - new_freq = policy->min; - __cpufreq_driver_target(policy, new_freq,CPUFREQ_RELATION_H); - } - - } - else //Resetting the min speed - { - if (policy->min < sleep_prev_freq) - policy->min=sleep_prev_freq; - if (policy->max < sleep_prev_max) - policy->max=sleep_prev_max; - } - -} - -static void smartass_early_suspend(struct early_suspend *handler) -{ - int i; - suspended = 1; - for_each_online_cpu(i) - smartass_suspend(i,1); -} - -static void smartass_late_resume(struct early_suspend *handler) -{ - int i; - suspended = 0; - for_each_online_cpu(i) - smartass_suspend(i,0); -} - -static struct early_suspend smartass_power_suspend = -{ - .suspend = smartass_early_suspend, - .resume = smartass_late_resume, -}; - - -static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) -{ - //Current freq -// unsigned int new_freq; - unsigned int load = 0; - unsigned int freq_target; - - struct cpufreq_policy *policy; - unsigned int j; - - policy = this_dbs_info->cur_policy; - - /* - * Every sampling_rate, we check, if current idle time is less - * than 20% (default), then we try to increase frequency - * Every sampling_rate*sampling_down_factor, we check, if current - * idle time is more than 80%, then we try to decrease frequency - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of maximum frequency - */ - - /* Get Absolute Load */ - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time; - unsigned int idle_time, wall_time; - - j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); - - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - - wall_time = (unsigned int) cputime64_sub(cur_wall_time, - j_dbs_info->prev_cpu_wall); - j_dbs_info->prev_cpu_wall = cur_wall_time; - - idle_time = (unsigned int) cputime64_sub(cur_idle_time, - j_dbs_info->prev_cpu_idle); - j_dbs_info->prev_cpu_idle = cur_idle_time; - - if (dbs_tuners_ins.ignore_nice) { - cputime64_t cur_nice; - unsigned long cur_nice_jiffies; - - cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, - j_dbs_info->prev_cpu_nice); - /* - * Assumption: nice time between sampling periods will - * be less than 2^32 jiffies for 32 bit sys - */ - cur_nice_jiffies = (unsigned long) - cputime64_to_jiffies64(cur_nice); - - j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; - idle_time += jiffies_to_usecs(cur_nice_jiffies); - } - - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - load = 100 * (wall_time - idle_time) / wall_time; - } - - /* - * break out if we 'cannot' reduce the speed as the user might - * want freq_step to be zero - */ - if (dbs_tuners_ins.freq_step == 0) - return; - - /* Check for frequency increase */ - if (load > dbs_tuners_ins.up_threshold) - { - this_dbs_info->down_skip = 0; - - /* if we are already at full speed then break out early */ - if (this_dbs_info->requested_freq == policy->max) - return; - freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - /* max freq cannot be less than 100. but who knows.... */ - if (unlikely(freq_target == 0)) - freq_target = 5; - - this_dbs_info->requested_freq += freq_target; - if (this_dbs_info->requested_freq > policy->max) - this_dbs_info->requested_freq = policy->max; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq,CPUFREQ_RELATION_H); - - return; - } - - /* - * The optimal frequency is the frequency that is the lowest that - * can support the current CPU usage without triggering the up - * policy. To be safe, we focus 10 points under the threshold. - */ - if (load < (dbs_tuners_ins.down_threshold - 10)) { - freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - - this_dbs_info->requested_freq -= freq_target; - if (this_dbs_info->requested_freq < policy->min) - this_dbs_info->requested_freq = policy->min; - - /* - * if we cannot reduce the frequency anymore, break out early - */ - if (policy->cur == policy->min) - return; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, - CPUFREQ_RELATION_H); - return; - } -} - -static void do_dbs_timer(struct work_struct *work) -{ - struct cpu_dbs_info_s *dbs_info = - container_of(work, struct cpu_dbs_info_s, work.work); - unsigned int cpu = dbs_info->cpu; - - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - delay -= jiffies % delay; - - mutex_lock(&dbs_info->timer_mutex); - - dbs_check_cpu(dbs_info); - - queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); - mutex_unlock(&dbs_info->timer_mutex); -} - -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) -{ - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - delay -= jiffies % delay; - - dbs_info->enable = 1; - INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, - delay); -} - -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) -{ - dbs_info->enable = 0; - cancel_delayed_work_sync(&dbs_info->work); -} - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - suspended=0; - - this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - mutex_lock(&dbs_mutex); - - rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) { - j_dbs_info->prev_cpu_nice = - kstat_cpu(j).cpustat.nice; - } - } - this_dbs_info->down_skip = 0; - this_dbs_info->requested_freq = policy->cur; - - mutex_init(&this_dbs_info->timer_mutex); - dbs_enable++; - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - unsigned int latency; - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - - /* - * conservative does not implement micro like ondemand - * governor, thus we are bound to jiffes/HZ - */ - min_sampling_rate = - MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); - /* Bring kernel and HW constraints together */ - min_sampling_rate = max(min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - dbs_tuners_ins.sampling_rate = - max(min_sampling_rate, - latency * LATENCY_MULTIPLIER); - - cpufreq_register_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } - mutex_unlock(&dbs_mutex); - - dbs_timer_init(this_dbs_info); - - break; - - case CPUFREQ_GOV_STOP: - dbs_timer_exit(this_dbs_info); - - mutex_lock(&dbs_mutex); - sysfs_remove_group(&policy->kobj, &dbs_attr_group); - dbs_enable--; - mutex_destroy(&this_dbs_info->timer_mutex); - - /* - * Stop the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 0) - cpufreq_unregister_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - - mutex_unlock(&dbs_mutex); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_dbs_info->timer_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&this_dbs_info->timer_mutex); - - break; - } - return 0; -} - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_Scary -static -#endif -struct cpufreq_governor cpufreq_gov_scary = { - .name = "Scary", - .governor = cpufreq_governor_dbs, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, -}; - -static int __init cpufreq_gov_dbs_init(void) -{ - int err; - - kconservative_wq = create_workqueue("kconservative"); - if (!kconservative_wq) { - printk(KERN_ERR "Creation of kconservative failed\n"); - return -EFAULT; - } - register_early_suspend(&smartass_power_suspend); - err = cpufreq_register_governor(&cpufreq_gov_scary); - if (err) - destroy_workqueue(kconservative_wq); - - return err; -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_scary); - destroy_workqueue(kconservative_wq); -} - - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCARY -fs_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); -#endif -module_exit(cpufreq_gov_dbs_exit); - From d3955e414c7a4bd5df4bf683c9a2763a90733f20 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 10:24:22 -0400 Subject: [PATCH 20/35] Revert "Fixed compile error (Lazy)" This reverts commit 0c43aba71595c7056ac8039839c428b3ef4d8e39. --- drivers/cpufreq/Makefile | 3 +++ include/linux/cpufreq.h | 7 +++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index da12e97c..b529a80f 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -22,14 +22,17 @@ obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o +<<<<<<< HEAD obj-$(CONFIG_CPU_FREQ_GOV_MINMAX) += cpufreq_minmax.o obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS2) += cpufreq_smartass2.o obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS) += cpufreq_smartass.o obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVEX) += cpufreq_interactivex.o obj-$(CONFIG_CPU_FREQ_GOV_LAGFREE) += cpufreq_lagfree.o obj-$(CONFIG_CPU_FREQ_GOV_LULZACTIVE) += cpufreq_lulzactive.o +======= obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o obj-$(CONFIG_CPU_FREQ_GOV_LAZY) += cpufreq_lazy.o +>>>>>>> 17381f7... Added CPUfreq governor 'lazy'. # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index ec8dea51..dceb54ca 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -396,9 +396,8 @@ extern struct cpufreq_governor cpufreq_gov_lazy; #define CPUFREQ_TABLE_END ~1 struct cpufreq_frequency_table { - unsigned int index; /* any */ - unsigned int frequency; /* kHz - doesn't need to be in ascending - * order */ + unsigned int index; + unsigned int frequency; }; int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, @@ -427,4 +426,4 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, void cpufreq_frequency_table_put_attr(unsigned int cpu); -#endif /* _LINUX_CPUFREQ_H */ +#endif From f209f0a43228a4cb59f26d1d0c5cb03cd76b00f5 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 10:24:54 -0400 Subject: [PATCH 21/35] Revert "Added CPUfreq governor 'lazy'." This reverts commit 198875804c0e0037dd74072f4d083b8ae06df7c0. --- drivers/cpufreq/Kconfig | 56 --- drivers/cpufreq/Makefile | 5 - drivers/cpufreq/cpufreq_lazy.c | 822 --------------------------------- include/linux/cpufreq.h | 3 - 4 files changed, 886 deletions(-) delete mode 100644 drivers/cpufreq/cpufreq_lazy.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index ba4d2b48..d794fc4d 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -234,7 +234,6 @@ config CPU_FREQ_DEFAULT_GOV_MINMAX bool "minmax" select CPU_FREQ_GOV_MINMAX select CPU_FREQ_GOV_PERFORMANCE - help Use the CPUFreq governor 'minmax' as default. This minimizes the frequency jumps does by the governor. This is aimed at maximizing both perfomance and battery life. @@ -263,7 +262,6 @@ config CPU_FREQ_GOV_CONSERVATIVE config CPU_FREQ_DEFAULT_GOV_SMARTASS2 bool "smartass2" select CPU_FREQ_GOV_SMARTASS2 - help Use the CPUFreq governor 'smartassV2' as default. config CPU_FREQ_DEFAULT_GOV_LAGFREE @@ -277,7 +275,6 @@ config CPU_FREQ_DEFAULT_GOV_LAGFREE Be aware that not all cpufreq drivers support the lagfree governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. - config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX bool "interactiveX" select CPU_FREQ_GOV_INTERACTIVEX @@ -353,14 +350,6 @@ config CPU_FREQ_GOV_POWERSAVE If in doubt, say Y. -config CPU_FREQ_DEFAULT_GOV_LAZY - bool "lazy" - select CPU_FREQ_GOV_LAZY - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lazy' as default. -endchoice - config CPU_FREQ_GOV_SLP tristate "'slp' cpufreq policy governor" @@ -385,47 +374,6 @@ config CPU_FREQ_GOV_USERSPACE If in doubt, say Y. -config CPU_FREQ_GOV_ONDEMAND - tristate "'ondemand' cpufreq policy governor" - select CPU_FREQ_TABLE - help - 'ondemand' - This driver adds a dynamic cpufreq policy governor. - The governor does a periodic polling and - changes frequency based on the CPU utilization. - The support for this governor depends on CPU capability to - do fast frequency switching (i.e, very low latency frequency - transitions). - - To compile this driver as a module, choose M here: the - module will be called cpufreq_ondemand. - - For details, take a look at linux/Documentation/cpu-freq. - - If in doubt, say N. - -config CPU_FREQ_GOV_CONSERVATIVE - tristate "'conservative' cpufreq governor" - depends on CPU_FREQ - help - 'conservative' - this driver is rather similar to the 'ondemand' - governor both in its source code and its purpose, the difference is - its optimisation for better suitability in a battery powered - environment. The frequency is gracefully increased and decreased - rather than jumping to 100% when speed is required. - - If you have a desktop machine then you should really be considering - the 'ondemand' governor instead, however if you are using a laptop, - PDA or even an AMD64 based computer (due to the unacceptable - step-by-step latency issues between the minimum and maximum frequency - transitions in the CPU) you will probably want to use this governor. - - To compile this driver as a module, choose M here: the - module will be called cpufreq_conservative. - - For details, take a look at linux/Documentation/cpu-freq. - - If in doubt, say N. - config CPU_FREQ_GOV_LULZACTIVE tristate "'lulzactive' cpufreq governor" depends on CPU_FREQ @@ -505,10 +453,6 @@ config CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER Sampling latency rate multiplied by the cpu switch latency. Affects governor polling. -config CPU_FREQ_GOV_LAZY - tristate "'lazy' cpufreq governor" - depends on CPU_FREQ - menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index b529a80f..d9355369 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -22,17 +22,12 @@ obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o -<<<<<<< HEAD obj-$(CONFIG_CPU_FREQ_GOV_MINMAX) += cpufreq_minmax.o obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS2) += cpufreq_smartass2.o obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS) += cpufreq_smartass.o obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVEX) += cpufreq_interactivex.o obj-$(CONFIG_CPU_FREQ_GOV_LAGFREE) += cpufreq_lagfree.o obj-$(CONFIG_CPU_FREQ_GOV_LULZACTIVE) += cpufreq_lulzactive.o -======= -obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o -obj-$(CONFIG_CPU_FREQ_GOV_LAZY) += cpufreq_lazy.o ->>>>>>> 17381f7... Added CPUfreq governor 'lazy'. # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/drivers/cpufreq/cpufreq_lazy.c b/drivers/cpufreq/cpufreq_lazy.c deleted file mode 100644 index 82bfe8a9..00000000 --- a/drivers/cpufreq/cpufreq_lazy.c +++ /dev/null @@ -1,822 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_lazy.c - * - * Copyright (C) 2001 Russell King - * (C) 2003 Venkatesh Pallipadi . - * Jun Nakajima - * (C) 2011 Ezekeel - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef CONFIG_HAS_EARLYSUSPEND -#include -#endif - -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ - -#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) -#define DEF_FREQUENCY_UP_THRESHOLD (80) -#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) -#define MICRO_FREQUENCY_UP_THRESHOLD (90) -#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) -#define MIN_FREQUENCY_UP_THRESHOLD (11) -#define MAX_FREQUENCY_UP_THRESHOLD (100) - -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) - * this governor will not work. - * All times here are in uS. - */ -#define MIN_SAMPLING_RATE_RATIO (2) - -static unsigned int min_sampling_rate, current_sampling_rate; - -#define LATENCY_MULTIPLIER (1000) -#define MIN_LATENCY_MULTIPLIER (100) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) - -static void do_dbs_timer(struct work_struct *work); -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_LAZY -static -#endif -struct cpufreq_governor cpufreq_gov_lazy = { - .name = "lazy", - .governor = cpufreq_governor_dbs, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, -}; - -/* Sampling types */ -enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; - -struct cpu_dbs_info_s { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_iowait; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - struct cpufreq_frequency_table *freq_table; - unsigned int freq_lo; - unsigned int freq_lo_jiffies; - unsigned int freq_hi_jiffies; - int cpu; - unsigned int sample_type:1; - /* - * percpu mutex that serializes governor limit change with - * do_dbs_timer invocation. We do not want do_dbs_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - -/* - * dbs_mutex protects protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); - -static struct dbs_tuners { - unsigned int sampling_rate; - unsigned int up_threshold; - unsigned int down_differential; - unsigned int ignore_nice; - unsigned int powersave_bias; - unsigned int io_is_busy; - unsigned int min_timeinstate; -#ifdef CONFIG_HAS_EARLYSUSPEND - bool screenoff_maxfreq; -#endif -} dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, - .ignore_nice = 0, - .powersave_bias = 0, -#ifdef CONFIG_HAS_EARLYSUSPEND - .screenoff_maxfreq = false, -#endif -}; - -#ifdef CONFIG_HAS_EARLYSUSPEND -static bool suspended = false; - -static void lazy_early_suspend(struct early_suspend *handler) -{ - suspended = true; - - return; -} - -static void lazy_late_resume(struct early_suspend *handler) -{ - suspended = false; - - return; -} - -static struct early_suspend lazy_suspend = { - .suspend = lazy_early_suspend, - .resume = lazy_late_resume, - .level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1, -}; -#endif - -static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, - cputime64_t *wall) -{ - cputime64_t idle_time; - cputime64_t cur_wall_time; - cputime64_t busy_time; - - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, - kstat_cpu(cpu).cpustat.system); - - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); - - idle_time = cputime64_sub(cur_wall_time, busy_time); - if (wall) - *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); - - return (cputime64_t)jiffies_to_usecs(idle_time); -} - -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) -{ - u64 idle_time = get_cpu_idle_time_us(cpu, wall); - - if (idle_time == -1ULL) - return get_cpu_idle_time_jiffy(cpu, wall); - - return idle_time; -} - -static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) -{ - u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); - - if (iowait_time == -1ULL) - return 0; - - return iowait_time; -} - -/* - * Find right freq to be set now with powersave_bias on. - * Returns the freq_hi to be used right now and will set freq_hi_jiffies, - * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. - */ -static unsigned int powersave_bias_target(struct cpufreq_policy *policy, - unsigned int freq_next, - unsigned int relation) -{ - unsigned int freq_req, freq_reduc, freq_avg; - unsigned int freq_hi, freq_lo; - unsigned int index = 0; - unsigned int jiffies_total, jiffies_hi, jiffies_lo; - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, - policy->cpu); - - if (!dbs_info->freq_table) { - dbs_info->freq_lo = 0; - dbs_info->freq_lo_jiffies = 0; - return freq_next; - } - - cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, - relation, &index); - freq_req = dbs_info->freq_table[index].frequency; - freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; - freq_avg = freq_req - freq_reduc; - - /* Find freq bounds for freq_avg in freq_table */ - index = 0; - cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, - CPUFREQ_RELATION_H, &index); - freq_lo = dbs_info->freq_table[index].frequency; - index = 0; - cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, - CPUFREQ_RELATION_L, &index); - freq_hi = dbs_info->freq_table[index].frequency; - - /* Find out how long we have to be in hi and lo freqs */ - if (freq_hi == freq_lo) { - dbs_info->freq_lo = 0; - dbs_info->freq_lo_jiffies = 0; - return freq_lo; - } - jiffies_total = usecs_to_jiffies(current_sampling_rate); - jiffies_hi = (freq_avg - freq_lo) * jiffies_total; - jiffies_hi += ((freq_hi - freq_lo) / 2); - jiffies_hi /= (freq_hi - freq_lo); - jiffies_lo = jiffies_total - jiffies_hi; - dbs_info->freq_lo = freq_lo; - dbs_info->freq_lo_jiffies = jiffies_lo; - dbs_info->freq_hi_jiffies = jiffies_hi; - return freq_hi; -} - -static void lazy_powersave_bias_init_cpu(int cpu) -{ - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - dbs_info->freq_table = cpufreq_frequency_get_table(cpu); - dbs_info->freq_lo = 0; -} - -static void lazy_powersave_bias_init(void) -{ - int i; - for_each_online_cpu(i) { - lazy_powersave_bias_init_cpu(i); - } -} - -/************************** sysfs interface ************************/ - -static ssize_t show_sampling_rate_min(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", min_sampling_rate); -} - -define_one_global_ro(sampling_rate_min); - -/* cpufreq_lazy Governor Tunables */ -#define show_one(file_name, object) \ - static ssize_t show_##file_name \ - (struct kobject *kobj, struct attribute *attr, char *buf) \ - { \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ - } -show_one(sampling_rate, sampling_rate); -show_one(io_is_busy, io_is_busy); -show_one(up_threshold, up_threshold); -show_one(ignore_nice_load, ignore_nice); -show_one(powersave_bias, powersave_bias); -show_one(min_timeinstate, min_timeinstate); -#ifdef CONFIG_HAS_EARLYSUSPEND -show_one(screenoff_maxfreq, screenoff_maxfreq); -#endif - -static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); - return count; -} - -static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.io_is_busy = !!input; - return count; -} - -static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || - input < MIN_FREQUENCY_UP_THRESHOLD) { - return -EINVAL; - } - dbs_tuners_ins.up_threshold = input; - return count; -} - -static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - mutex_unlock(&dbs_mutex); - return count; - } - dbs_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(od_cpu_dbs_info, j); - dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; - - } - return count; -} - -static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - if (input > 1000) - input = 1000; - - dbs_tuners_ins.powersave_bias = input; - lazy_powersave_bias_init(); - return count; -} - -static ssize_t store_min_timeinstate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - dbs_tuners_ins.min_timeinstate = max(input, min_sampling_rate); - return count; -} - -#ifdef CONFIG_HAS_EARLYSUSPEND -static ssize_t store_screenoff_maxfreq(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1 || input > 1) - return -EINVAL; - dbs_tuners_ins.screenoff_maxfreq = input; - return count; -} -#endif - -define_one_global_rw(sampling_rate); -define_one_global_rw(io_is_busy); -define_one_global_rw(up_threshold); -define_one_global_rw(ignore_nice_load); -define_one_global_rw(powersave_bias); -define_one_global_rw(min_timeinstate); -#ifdef CONFIG_HAS_EARLYSUSPEND -define_one_global_rw(screenoff_maxfreq); -#endif - -static struct attribute *dbs_attributes[] = { - &sampling_rate_min.attr, - &sampling_rate.attr, - &up_threshold.attr, - &ignore_nice_load.attr, - &powersave_bias.attr, - &io_is_busy.attr, - &min_timeinstate.attr, -#ifdef CONFIG_HAS_EARLYSUSPEND - &screenoff_maxfreq.attr, -#endif - NULL -}; - -static struct attribute_group dbs_attr_group = { - .attrs = dbs_attributes, - .name = "lazy", -}; - -/************************** sysfs end ************************/ - -static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) -{ - unsigned int max_load_freq; - - struct cpufreq_policy *policy; - unsigned int j; - - this_dbs_info->freq_lo = 0; - policy = this_dbs_info->cur_policy; - - current_sampling_rate = dbs_tuners_ins.sampling_rate; - -#ifdef CONFIG_HAS_EARLYSUSPEND - if (suspended && dbs_tuners_ins.screenoff_maxfreq) { - /* if we are already at full speed then break out early */ - if (!dbs_tuners_ins.powersave_bias) { - if (policy->cur == policy->max) - return; - - __cpufreq_driver_target(policy, policy->max, - CPUFREQ_RELATION_H); - } else { - int freq = powersave_bias_target(policy, policy->max, - CPUFREQ_RELATION_H); - __cpufreq_driver_target(policy, freq, - CPUFREQ_RELATION_L); - } - current_sampling_rate = dbs_tuners_ins.min_timeinstate; - return; - } -#endif - - /* - * Every sampling_rate, we check, if current idle time is less - * than 20% (default), then we try to increase frequency - * Every sampling_rate, we look for a the lowest - * frequency which can sustain the load while keeping idle time over - * 30%. If such a frequency exist, we try to decrease to this frequency. - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of current frequency - */ - - /* Get Absolute Load - in terms of freq */ - max_load_freq = 0; - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; - unsigned int idle_time, wall_time, iowait_time; - unsigned int load, load_freq; - int freq_avg; - - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - - wall_time = (unsigned int) cputime64_sub(cur_wall_time, - j_dbs_info->prev_cpu_wall); - j_dbs_info->prev_cpu_wall = cur_wall_time; - - idle_time = (unsigned int) cputime64_sub(cur_idle_time, - j_dbs_info->prev_cpu_idle); - j_dbs_info->prev_cpu_idle = cur_idle_time; - - iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, - j_dbs_info->prev_cpu_iowait); - j_dbs_info->prev_cpu_iowait = cur_iowait_time; - - if (dbs_tuners_ins.ignore_nice) { - cputime64_t cur_nice; - unsigned long cur_nice_jiffies; - - cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, - j_dbs_info->prev_cpu_nice); - /* - * Assumption: nice time between sampling periods will - * be less than 2^32 jiffies for 32 bit sys - */ - cur_nice_jiffies = (unsigned long) - cputime64_to_jiffies64(cur_nice); - - j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; - idle_time += jiffies_to_usecs(cur_nice_jiffies); - } - - /* - * For the purpose of lazy, waiting for disk IO is an - * indication that you're performance critical, and not that - * the system is actually idle. So subtract the iowait time - * from the cpu idle time. - */ - - if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) - idle_time -= iowait_time; - - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - load = 100 * (wall_time - idle_time) / wall_time; - - freq_avg = __cpufreq_driver_getavg(policy, j); - if (freq_avg <= 0) - freq_avg = policy->cur; - - load_freq = load * freq_avg; - if (load_freq > max_load_freq) - max_load_freq = load_freq; - } - - /* Check for frequency increase */ - if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { - /* if we are already at full speed then break out early */ - if (!dbs_tuners_ins.powersave_bias) { - if (policy->cur == policy->max) - return; - - __cpufreq_driver_target(policy, policy->max, - CPUFREQ_RELATION_H); - } else { - int freq = powersave_bias_target(policy, policy->max, - CPUFREQ_RELATION_H); - __cpufreq_driver_target(policy, freq, - CPUFREQ_RELATION_L); - } - current_sampling_rate = dbs_tuners_ins.min_timeinstate; - return; - } - - /* Check for frequency decrease */ - /* if we cannot reduce the frequency anymore, break out early */ - if (policy->cur == policy->min) - return; - - /* - * The optimal frequency is the frequency that is the lowest that - * can support the current CPU usage without triggering the up - * policy. To be safe, we focus 10 points under the threshold. - */ - if (max_load_freq < - (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * - policy->cur) { - unsigned int freq_next; - freq_next = max_load_freq / - (dbs_tuners_ins.up_threshold - - dbs_tuners_ins.down_differential); - - if (freq_next < policy->min) - freq_next = policy->min; - - if (!dbs_tuners_ins.powersave_bias) { - __cpufreq_driver_target(policy, freq_next, - CPUFREQ_RELATION_L); - } else { - int freq = powersave_bias_target(policy, freq_next, - CPUFREQ_RELATION_L); - __cpufreq_driver_target(policy, freq, - CPUFREQ_RELATION_L); - } - current_sampling_rate = dbs_tuners_ins.min_timeinstate; - } -} - -static void do_dbs_timer(struct work_struct *work) -{ - struct cpu_dbs_info_s *dbs_info = - container_of(work, struct cpu_dbs_info_s, work.work); - unsigned int cpu = dbs_info->cpu; - int delay; - int sample_type = dbs_info->sample_type; - - mutex_lock(&dbs_info->timer_mutex); - - /* Common NORMAL_SAMPLE setup */ - dbs_info->sample_type = DBS_NORMAL_SAMPLE; - if (!dbs_tuners_ins.powersave_bias || - sample_type == DBS_NORMAL_SAMPLE) { - dbs_check_cpu(dbs_info); - if (dbs_info->freq_lo) { - /* Setup timer for SUB_SAMPLE */ - dbs_info->sample_type = DBS_SUB_SAMPLE; - delay = dbs_info->freq_hi_jiffies; - } else { - delay = usecs_to_jiffies(current_sampling_rate); - if (num_online_cpus() > 1) - delay -= jiffies % delay; - } - } else { - __cpufreq_driver_target(dbs_info->cur_policy, - dbs_info->freq_lo, CPUFREQ_RELATION_H); - delay = usecs_to_jiffies(current_sampling_rate); - if (num_online_cpus() > 1) - delay -= jiffies % delay; - } - schedule_delayed_work_on(cpu, &dbs_info->work, delay); - mutex_unlock(&dbs_info->timer_mutex); -} - -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) -{ - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(current_sampling_rate); - delay -= jiffies % delay; - - dbs_info->sample_type = DBS_NORMAL_SAMPLE; - INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); -} - -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) -{ - cancel_delayed_work_sync(&dbs_info->work); -} - -/* - * Not all CPUs want IO time to be accounted as busy; this dependson how - * efficient idling at a higher frequency/voltage is. - * Pavel Machek says this is not so for various generations of AMD and old - * Intel systems. - * Mike Chan (androidlcom) calis this is also not true for ARM. - * Because of this, whitelist specific known (series) of CPUs by default, and - * leave all others up to the user. - */ -static int should_io_be_busy(void) -{ -#if defined(CONFIG_X86) - /* - * For Intel, Core 2 (model 15) andl later have an efficient idle. - */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_data.x86 == 6 && - boot_cpu_data.x86_model >= 15) - return 1; -#endif - return 0; -} - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - mutex_lock(&dbs_mutex); - - dbs_enable++; - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) { - j_dbs_info->prev_cpu_nice = - kstat_cpu(j).cpustat.nice; - } - } - this_dbs_info->cpu = cpu; - lazy_powersave_bias_init_cpu(cpu); - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - unsigned int latency; - - rc = sysfs_create_group(cpufreq_global_kobject, - &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - /* Bring kernel and HW constraints together */ - min_sampling_rate = max(min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - dbs_tuners_ins.sampling_rate = min_sampling_rate; - current_sampling_rate = dbs_tuners_ins.sampling_rate; - dbs_tuners_ins.min_timeinstate = latency * LATENCY_MULTIPLIER; - dbs_tuners_ins.io_is_busy = should_io_be_busy(); - } - mutex_unlock(&dbs_mutex); - - mutex_init(&this_dbs_info->timer_mutex); - dbs_timer_init(this_dbs_info); - break; - - case CPUFREQ_GOV_STOP: - dbs_timer_exit(this_dbs_info); - - mutex_lock(&dbs_mutex); - mutex_destroy(&this_dbs_info->timer_mutex); - dbs_enable--; - mutex_unlock(&dbs_mutex); - if (!dbs_enable) - sysfs_remove_group(cpufreq_global_kobject, - &dbs_attr_group); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_dbs_info->timer_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&this_dbs_info->timer_mutex); - break; - } - return 0; -} - -static int __init cpufreq_gov_dbs_init(void) -{ - cputime64_t wall; - u64 idle_time; - int cpu = get_cpu(); - - idle_time = get_cpu_idle_time_us(cpu, &wall); - put_cpu(); - if (idle_time != -1ULL) { - /* Idle micro accounting is supported. Use finer thresholds */ - dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; - dbs_tuners_ins.down_differential = - MICRO_FREQUENCY_DOWN_DIFFERENTIAL; - /* - * In no_hz/micro accounting case we set the minimum frequency - * not depending on HZ, but fixed (very low). The deferred - * timer might skip some samples if idle/sleeping as needed. - */ - min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; - } else { - /* For correct statistics, we need 10 ticks for each measure */ - min_sampling_rate = - MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); - } - -#ifdef CONFIG_HAS_EARLYSUSPEND - register_early_suspend(&lazy_suspend); -#endif - - return cpufreq_register_governor(&cpufreq_gov_lazy); -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_lazy); -} - - -MODULE_AUTHOR("Venkatesh Pallipadi "); -MODULE_AUTHOR("Alexey Starikovskiy "); -MODULE_AUTHOR("Ezekeel "); -MODULE_DESCRIPTION("'cpufreq_lazy' - A dynamic cpufreq governor for " - "Low Latency Frequency Transition capable processors"); -MODULE_LICENSE("GPL"); - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_LAZY -fs_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); -#endif -module_exit(cpufreq_gov_dbs_exit); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index dceb54ca..a301beed 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -382,9 +382,6 @@ extern struct cpufreq_governor cpufreq_gov_lulzactive; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS) extern struct cpufreq_governor cpufreq_gov_smartass; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_smartass) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_LAZY) -extern struct cpufreq_governor cpufreq_gov_lazy; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_lazy) #endif From 27e15b5d5cbfb9a5dca3a6faebe975efc71c9f81 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 10:25:10 -0400 Subject: [PATCH 22/35] Revert "Added LulzactiveV2,Lagfree,Smartass,SmartassV2,Minmax,InteractiveX Governor" This reverts commit 7e0f70e6304a15249e64bd4d1fb5da7f5fdef83f. --- drivers/cpufreq/Kconfig | 116 +-- drivers/cpufreq/Makefile | 24 +- drivers/cpufreq/cpufreq_interactivex.c | 381 -------- drivers/cpufreq/cpufreq_lagfree.c | 662 -------------- drivers/cpufreq/cpufreq_lulzactive.c | 1143 ------------------------ drivers/cpufreq/cpufreq_minmax.c | 575 ------------ drivers/cpufreq/cpufreq_smartass.c | 642 ------------- drivers/cpufreq/cpufreq_smartass2.c | 868 ------------------ include/linux/cpufreq.h | 63 +- 9 files changed, 16 insertions(+), 4458 deletions(-) delete mode 100644 drivers/cpufreq/cpufreq_interactivex.c delete mode 100644 drivers/cpufreq/cpufreq_lagfree.c delete mode 100644 drivers/cpufreq/cpufreq_lulzactive.c delete mode 100644 drivers/cpufreq/cpufreq_minmax.c delete mode 100644 drivers/cpufreq/cpufreq_smartass.c delete mode 100644 drivers/cpufreq/cpufreq_smartass2.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index d794fc4d..2e07ae54 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -230,14 +230,6 @@ config CPU_FREQ_GOV_BADASS module will be called cpufreq_badass. If in doubt, say N. -config CPU_FREQ_DEFAULT_GOV_MINMAX - bool "minmax" - select CPU_FREQ_GOV_MINMAX - select CPU_FREQ_GOV_PERFORMANCE - Use the CPUFreq governor 'minmax' as default. This minimizes the - frequency jumps does by the governor. This is aimed at maximizing - both perfomance and battery life. - config CPU_FREQ_GOV_CONSERVATIVE tristate "'conservative' cpufreq governor" depends on CPU_FREQ @@ -257,51 +249,10 @@ config CPU_FREQ_GOV_CONSERVATIVE To compile this driver as a module, choose M here: the module will be called cpufreq_conservative. - For details, take a look at linux/Documentation/cpu-freq. - -config CPU_FREQ_DEFAULT_GOV_SMARTASS2 - bool "smartass2" - select CPU_FREQ_GOV_SMARTASS2 - Use the CPUFreq governor 'smartassV2' as default. - -config CPU_FREQ_DEFAULT_GOV_LAGFREE - bool "lagfree" - select CPU_FREQ_GOV_LAGFREE - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lagfree' as default. This allows - you to get a full dynamic frequency capable system by simply - loading your cpufreq low-level hardware driver. - Be aware that not all cpufreq drivers support the lagfree - governor. If unsure have a look at the help section of the - driver. Fallback governor will be the performance governor. -config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX - bool "interactiveX" - select CPU_FREQ_GOV_INTERACTIVEX - help - Use the CPUFreq governor 'interactivex' as default. This allows - you to get a full dynamic cpu frequency capable system by simply - loading your cpufreq low-level hardware driver, using the - 'interactivex' governor for latency-sensitive workloads. + For details, take a look at linux/Documentation/cpu-freq. If in doubt, say N. -config CPU_FREQ_DEFAULT_GOV_LULZACTIVE - bool "lulzactive" - select CPU_FREQ_GOV_LULZACTIVE - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lulzactive' as default. - -config CPU_FREQ_DEFAULT_GOV_SMARTASS - bool "smartass" - select CPU_FREQ_GOV_SMARTASS - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'smartass' as default. - -endchoice - config CPU_FREQ_GOV_DANCEDANCE tristate "'dancedance' cpufreq governor" depends on CPU_FREQ @@ -374,68 +325,10 @@ config CPU_FREQ_GOV_USERSPACE If in doubt, say Y. -config CPU_FREQ_GOV_LULZACTIVE - tristate "'lulzactive' cpufreq governor" - depends on CPU_FREQ - help - 'lulzactive' - a new interactive governor by Tegrak! - config CPU_FREQ_GOV_WHEATLEY tristate "'wheatley' cpufreq governor" depends on CPU_FREQ - If in doubt, say N. - -config CPU_FREQ_GOV_SMARTASS - tristate "'smartass' cpufreq governor" - depends on CPU_FREQ - help - 'smartass' - a "smart" optimized governor for the hero! - - If in doubt, say N. - -config CPU_FREQ_GOV_MINMAX - tristate "'minmax' cpufreq governor" - depends on CPU_FREQ - help - 'minmax' - this driver tries to minimize the frequency jumps by limiting - the the selected frequencies to either the min or the max frequency of - the policy. The frequency is selected according to the load. - -config CPU_FREQ_GOV_SMARTASS2 - tristate "'smartassV2' cpufreq governor" - depends on CPU_FREQ - help - 'smartassV2' - a "smart" optimized governor for the hero! - -config CPU_FREQ_GOV_INTERACTIVEX -tristate "'interactiveX' cpufreq policy governor" - help - 'interactiveX' - Modified version of interactive with sleep+wake code. - -config CPU_FREQ_GOV_LAGFREE - tristate "'lagfree' cpufreq governor" - depends on CPU_FREQ - help - 'lagfree' - this driver is rather similar to the 'ondemand' - governor both in its source code and its purpose, the difference is - its optimisation for better suitability in a battery powered - environment. The frequency is gracefully increased and decreased - rather than jumping to 100% when speed is required. - - To compile this driver as a module, choose M here: the - module will be called cpufreq_lagfree. - - For details, take a look at linux/Documentation/cpu-freq. - - If in doubt, say N. - -config CPU_FREQ_MIN_TICKS - int "Ticks between governor polling interval." - default 10 - help - Minimum number of ticks between polling interval for governors. - config SEC_DVFS bool "DVFS job" default n @@ -446,13 +339,6 @@ config SEC_DVFS_BOOSTER default y depends on SEC_DVFS -config CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER - int "Sampling rate multiplier for governors." - default 1000 - help - Sampling latency rate multiplied by the cpu switch latency. - Affects governor polling. - menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index d9355369..be135afd 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -20,14 +20,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o -obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o -obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o -obj-$(CONFIG_CPU_FREQ_GOV_MINMAX) += cpufreq_minmax.o -obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS2) += cpufreq_smartass2.o -obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS) += cpufreq_smartass.o -obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVEX) += cpufreq_interactivex.o -obj-$(CONFIG_CPU_FREQ_GOV_LAGFREE) += cpufreq_lagfree.o -obj-$(CONFIG_CPU_FREQ_GOV_LULZACTIVE) += cpufreq_lulzactive.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o @@ -56,7 +48,19 @@ obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o -##################################################################################d - +################################################################################## # ARM SoC drivers obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o +obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o +obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o +obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o +obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o +obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o +obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o +obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o +obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o + +################################################################################## +# PowerPC platform drivers +obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o +obj-$(CONFIG_MSM_DCVS) += cpufreq_gov_msm.o diff --git a/drivers/cpufreq/cpufreq_interactivex.c b/drivers/cpufreq/cpufreq_interactivex.c deleted file mode 100644 index 72ca6291..00000000 --- a/drivers/cpufreq/cpufreq_interactivex.c +++ /dev/null @@ -1,381 +0,0 @@ -/* -* drivers/cpufreq/cpufreq_interactivex.c -* -* Copyright (C) 2010 Google, Inc. -* -* This software is licensed under the terms of the GNU General Public -* License version 2, as published by the Free Software Foundation, and -* may be copied, distributed, and modified under those terms. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -* GNU General Public License for more details. -* -* Author: Mike Chan (mike@android.com) - modified for suspend/wake by imoseyon -* -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -static void (*pm_idle_old)(void); -static atomic_t active_count = ATOMIC_INIT(0); - -static DEFINE_PER_CPU(struct timer_list, cpu_timer); - -static DEFINE_PER_CPU(u64, time_in_idle); -static DEFINE_PER_CPU(u64, idle_exit_time); - -static struct cpufreq_policy *policy; -static unsigned int target_freq; - -/* Workqueues handle frequency scaling */ -static struct workqueue_struct *up_wq; -static struct workqueue_struct *down_wq; -static struct work_struct freq_scale_work; - -static u64 freq_change_time; -static u64 freq_change_time_in_idle; - -static cpumask_t work_cpumask; - -static unsigned int suspended = 0; -static unsigned int enabled = 0; - -/* -* The minimum ammount of time to spend at a frequency before we can ramp down, -* default is 50ms. -*/ -#define DEFAULT_MIN_SAMPLE_TIME 50000; -static unsigned long min_sample_time; - -#define FREQ_THRESHOLD 998400; -#define RESUME_SPEED 998400; - -static int cpufreq_governor_interactivex(struct cpufreq_policy *policy, -unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX -static -#endif -struct cpufreq_governor cpufreq_gov_interactivex = { -.name = "interactiveX", -.governor = cpufreq_governor_interactivex, -#if defined(CONFIG_ARCH_MSM_SCORPION) -.max_transition_latency = 8000000, -#else -.max_transition_latency = 10000000, -#endif -.owner = THIS_MODULE, -}; - -static void cpufreq_interactivex_timer(unsigned long data) -{ -u64 delta_idle; -u64 update_time; -u64 *cpu_time_in_idle; -u64 *cpu_idle_exit_time; -struct timer_list *t; - -u64 now_idle = get_cpu_idle_time_us(data, -&update_time); - - -cpu_time_in_idle = &per_cpu(time_in_idle, data); -cpu_idle_exit_time = &per_cpu(idle_exit_time, data); - -if (update_time == *cpu_idle_exit_time) -return; - -delta_idle = cputime64_sub(now_idle, *cpu_time_in_idle); - -/* Scale up if there were no idle cycles since coming out of idle */ -if (delta_idle == 0) { -if (policy->cur == policy->max) -return; - -if (nr_running() < 1) -return; - -target_freq = policy->max; - -cpumask_set_cpu(data, &work_cpumask); -queue_work(up_wq, &freq_scale_work); -return; -} - -/* -* There is a window where if the cpu utlization can go from low to high -* between the timer expiring, delta_idle will be > 0 and the cpu will -* be 100% busy, preventing idle from running, and this timer from -* firing. So setup another timer to fire to check cpu utlization. -* Do not setup the timer if there is no scheduled work. -*/ -t = &per_cpu(cpu_timer, data); -if (!timer_pending(t) && nr_running() > 0) { -*cpu_time_in_idle = get_cpu_idle_time_us( -data, cpu_idle_exit_time); -mod_timer(t, jiffies + 2); -} - -if (policy->cur == policy->min) -return; - -/* -* Do not scale down unless we have been at this frequency for the -* minimum sample time. -*/ -if (cputime64_sub(update_time, freq_change_time) < min_sample_time) -return; - -target_freq = policy->min; -cpumask_set_cpu(data, &work_cpumask); -queue_work(down_wq, &freq_scale_work); -} - -static void cpufreq_idle(void) -{ -struct timer_list *t; -u64 *cpu_time_in_idle; -u64 *cpu_idle_exit_time; - -pm_idle_old(); - -if (!cpumask_test_cpu(smp_processor_id(), policy->cpus)) -return; - -/* Timer to fire in 1-2 ticks, jiffie aligned. */ -t = &per_cpu(cpu_timer, smp_processor_id()); -cpu_idle_exit_time = &per_cpu(idle_exit_time, smp_processor_id()); -cpu_time_in_idle = &per_cpu(time_in_idle, smp_processor_id()); - -if (timer_pending(t) == 0) { -*cpu_time_in_idle = get_cpu_idle_time_us( -smp_processor_id(), cpu_idle_exit_time); -mod_timer(t, jiffies + 2); -} -} - -/* -* Choose the cpu frequency based off the load. For now choose the minimum -* frequency that will satisfy the load, which is not always the lower power. -*/ -static unsigned int cpufreq_interactivex_calc_freq(unsigned int cpu) -{ -unsigned int delta_time; -unsigned int idle_time; -unsigned int cpu_load; -unsigned int newfreq; -u64 current_wall_time; -u64 current_idle_time;; - -current_idle_time = get_cpu_idle_time_us(cpu, ¤t_wall_time); - -idle_time = (unsigned int) current_idle_time - freq_change_time_in_idle; -delta_time = (unsigned int) current_wall_time - freq_change_time; - -cpu_load = 100 * (delta_time - idle_time) / delta_time; - -if (cpu_load > 98) newfreq = policy->max; -else newfreq = policy->cur * cpu_load / 100; - -return newfreq; -} - - -/* We use the same work function to sale up and down */ -static void cpufreq_interactivex_freq_change_time_work(struct work_struct *work) -{ -unsigned int cpu; -unsigned int newtarget; -cpumask_t tmp_mask = work_cpumask; -newtarget = FREQ_THRESHOLD; - -for_each_cpu(cpu, &tmp_mask) { -if (!suspended) { -if (target_freq == policy->max) { -if (nr_running() == 1) { -cpumask_clear_cpu(cpu, &work_cpumask); -return; -} -// __cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_H); -__cpufreq_driver_target(policy, newtarget, CPUFREQ_RELATION_H); -} else { -target_freq = cpufreq_interactivex_calc_freq(cpu); -__cpufreq_driver_target(policy, target_freq, -CPUFREQ_RELATION_L); -} -} -freq_change_time_in_idle = get_cpu_idle_time_us(cpu, &freq_change_time); -cpumask_clear_cpu(cpu, &work_cpumask); -} - - -} - -static ssize_t show_min_sample_time(struct kobject *kobj, -struct attribute *attr, char *buf) -{ -return sprintf(buf, "%lu\n", min_sample_time); -} - -static ssize_t store_min_sample_time(struct kobject *kobj, -struct attribute *attr, const char *buf, size_t count) -{ -return strict_strtoul(buf, 0, &min_sample_time); -} - -static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644, -show_min_sample_time, store_min_sample_time); - -static struct attribute *interactivex_attributes[] = { -&min_sample_time_attr.attr, -NULL, -}; - -static struct attribute_group interactivex_attr_group = { -.attrs = interactivex_attributes, -.name = "interactiveX", -}; - -static void interactivex_suspend(int suspend) -{ -unsigned int max_speed; - -max_speed = RESUME_SPEED; - -if (!enabled) return; - if (!suspend) { // resume at max speed: -suspended = 0; - __cpufreq_driver_target(policy, max_speed, CPUFREQ_RELATION_L); - pr_info("[imoseyon] interactiveX awake at %d\n", policy->cur); - } else { -suspended = 1; - __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); - pr_info("[imoseyon] interactiveX suspended at %d\n", policy->cur); - } -} - -static void interactivex_early_suspend(struct early_suspend *handler) { - interactivex_suspend(1); -} - -static void interactivex_late_resume(struct early_suspend *handler) { - interactivex_suspend(0); -} - -static struct early_suspend interactivex_power_suspend = { - .suspend = interactivex_early_suspend, - .resume = interactivex_late_resume, - .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, -}; - -static int cpufreq_governor_interactivex(struct cpufreq_policy *new_policy, -unsigned int event) -{ -int rc; -switch (event) { -case CPUFREQ_GOV_START: -if (!cpu_online(new_policy->cpu)) -return -EINVAL; - -/* -* Do not register the idle hook and create sysfs -* entries if we have already done so. -*/ -if (atomic_inc_return(&active_count) > 1) -return 0; - -rc = sysfs_create_group(cpufreq_global_kobject, -&interactivex_attr_group); -if (rc) -return rc; - -pm_idle_old = pm_idle; -pm_idle = cpufreq_idle; -policy = new_policy; -enabled = 1; - register_early_suspend(&interactivex_power_suspend); - pr_info("[imoseyon] interactiveX active\n"); -break; - -case CPUFREQ_GOV_STOP: -if (atomic_dec_return(&active_count) > 1) -return 0; - -sysfs_remove_group(cpufreq_global_kobject, -&interactivex_attr_group); - -pm_idle = pm_idle_old; -del_timer(&per_cpu(cpu_timer, new_policy->cpu)); -enabled = 0; - unregister_early_suspend(&interactivex_power_suspend); - pr_info("[imoseyon] interactiveX inactive\n"); -break; - -case CPUFREQ_GOV_LIMITS: -if (new_policy->max < new_policy->cur) -__cpufreq_driver_target(new_policy, -new_policy->max, CPUFREQ_RELATION_H); -else if (new_policy->min > new_policy->cur) -__cpufreq_driver_target(new_policy, -new_policy->min, CPUFREQ_RELATION_L); -break; -} -return 0; -} - -static int __init cpufreq_interactivex_init(void) -{ -unsigned int i; -struct timer_list *t; -min_sample_time = DEFAULT_MIN_SAMPLE_TIME; - -/* Initalize per-cpu timers */ -for_each_possible_cpu(i) { -t = &per_cpu(cpu_timer, i); -init_timer_deferrable(t); -t->function = cpufreq_interactivex_timer; -t->data = i; -} - -/* Scale up is high priority */ -up_wq = create_workqueue("kinteractive_up"); -down_wq = create_workqueue("knteractive_down"); - -INIT_WORK(&freq_scale_work, cpufreq_interactivex_freq_change_time_work); - - pr_info("[imoseyon] interactiveX enter\n"); -return cpufreq_register_governor(&cpufreq_gov_interactivex); -} - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX -fs_initcall(cpufreq_interactivex_init); -#else -module_init(cpufreq_interactivex_init); -#endif - -static void __exit cpufreq_interactivex_exit(void) -{ - pr_info("[imoseyon] interactiveX exit\n"); -cpufreq_unregister_governor(&cpufreq_gov_interactivex); -destroy_workqueue(up_wq); -destroy_workqueue(down_wq); -} - -module_exit(cpufreq_interactivex_exit); - -MODULE_AUTHOR("Mike Chan "); -MODULE_DESCRIPTION("'cpufreq_interactiveX' - A cpufreq governor for " -"Latency sensitive workloads"); -MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq_lagfree.c b/drivers/cpufreq/cpufreq_lagfree.c deleted file mode 100644 index bf274a11..00000000 --- a/drivers/cpufreq/cpufreq_lagfree.c +++ /dev/null @@ -1,662 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_lagfree.c - * - * Copyright (C) 2001 Russell King - * (C) 2003 Venkatesh Pallipadi . - * Jun Nakajima - * (C) 2004 Alexander Clouter - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ - -#define DEF_FREQUENCY_UP_THRESHOLD (50) -#define DEF_FREQUENCY_DOWN_THRESHOLD (15) -#define FREQ_STEP_DOWN (160000) -#define FREQ_SLEEP_MAX (320000) -#define FREQ_AWAKE_MIN (480000) -#define FREQ_STEP_UP_SLEEP_PERCENT (20) - -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers - * with CPUFREQ_ETERNAL), this governor will not work. - * All times here are in uS. - */ -static unsigned int def_sampling_rate; -unsigned int suspended = 0; -#define MIN_SAMPLING_RATE_RATIO (2) -/* for correct statistics, we need at least 10 ticks between each measure */ -#define MIN_STAT_SAMPLING_RATE \ - (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(CONFIG_CPU_FREQ_MIN_TICKS)) -#define MIN_SAMPLING_RATE \ - (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) -#define MAX_SAMPLING_RATE (500 * def_sampling_rate) -#define DEF_SAMPLING_DOWN_FACTOR (4) -#define MAX_SAMPLING_DOWN_FACTOR (10) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) - -static void do_dbs_timer(struct work_struct *work); - -struct cpu_dbs_info_s { - struct cpufreq_policy *cur_policy; - unsigned int prev_cpu_idle_up; - unsigned int prev_cpu_idle_down; - unsigned int enable; - unsigned int down_skip; - unsigned int requested_freq; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - -/* - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug - * lock and dbs_mutex. cpu_hotplug lock should always be held before - * dbs_mutex. If any function that can potentially take cpu_hotplug lock - * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then - * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock - * is recursive for the same process. -Venki - */ -static DEFINE_MUTEX (dbs_mutex); -static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); - -struct dbs_tuners { - unsigned int sampling_rate; - unsigned int sampling_down_factor; - unsigned int up_threshold; - unsigned int down_threshold; - unsigned int ignore_nice; - //unsigned int freq_step; -}; - -static struct dbs_tuners dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, - .ignore_nice = 1, - //.freq_step = 5, -}; - -static inline unsigned int get_cpu_idle_time(unsigned int cpu) -{ - unsigned int add_nice = 0, ret; - - if (dbs_tuners_ins.ignore_nice) - add_nice = kstat_cpu(cpu).cpustat.nice; - - ret = kstat_cpu(cpu).cpustat.idle + - kstat_cpu(cpu).cpustat.iowait + - add_nice; - - return ret; -} - -/* keep track of frequency transitions */ -static int -dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, - void *data) -{ - struct cpufreq_freqs *freq = data; - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, - freq->cpu); - - if (!this_dbs_info->enable) - return 0; - - this_dbs_info->requested_freq = freq->new; - - return 0; -} - -static struct notifier_block dbs_cpufreq_notifier_block = { - .notifier_call = dbs_cpufreq_notifier -}; - -/************************** sysfs interface ************************/ -static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) -{ - return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); -} - -static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) -{ - return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); -} - -#define define_one_ro(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0444, show_##_name, NULL) - -define_one_ro(sampling_rate_max); -define_one_ro(sampling_rate_min); - -/* cpufreq_lagfree Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct cpufreq_policy *unused, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(sampling_down_factor, sampling_down_factor); -show_one(up_threshold, up_threshold); -show_one(down_threshold, down_threshold); -show_one(ignore_nice_load, ignore_nice); -//show_one(freq_step, freq_step); - -static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf (buf, "%u", &input); - if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.sampling_down_factor = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_sampling_rate(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf (buf, "%u", &input); - - mutex_lock(&dbs_mutex); - if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.sampling_rate = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_up_threshold(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf (buf, "%u", &input); - - mutex_lock(&dbs_mutex); - if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.up_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_down_threshold(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf (buf, "%u", &input); - - mutex_lock(&dbs_mutex); - if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.down_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - mutex_lock(&dbs_mutex); - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - mutex_unlock(&dbs_mutex); - return count; - } - dbs_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); - j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; - } - mutex_unlock(&dbs_mutex); - - return count; -} - -/*static ssize_t store_freq_step(struct cpufreq_policy *policy, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - if (input > 100) - input = 100; - - / * no need to test here if freq_step is zero as the user might actually - * want this, they would be crazy though :) * / - mutex_lock(&dbs_mutex); - dbs_tuners_ins.freq_step = input; - mutex_unlock(&dbs_mutex); - - return count; -}*/ - -#define define_one_rw(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0644, show_##_name, store_##_name) - -define_one_rw(sampling_rate); -define_one_rw(sampling_down_factor); -define_one_rw(up_threshold); -define_one_rw(down_threshold); -define_one_rw(ignore_nice_load); -//define_one_rw(freq_step); - -static struct attribute * dbs_attributes[] = { - &sampling_rate_max.attr, - &sampling_rate_min.attr, - &sampling_rate.attr, - &sampling_down_factor.attr, - &up_threshold.attr, - &down_threshold.attr, - &ignore_nice_load.attr, - //&freq_step.attr, - NULL -}; - -static struct attribute_group dbs_attr_group = { - .attrs = dbs_attributes, - .name = "lagfree", -}; - -/************************** sysfs end ************************/ - -static void dbs_check_cpu(int cpu) -{ - unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; - unsigned int tmp_idle_ticks, total_idle_ticks; - unsigned int freq_target; - unsigned int freq_down_sampling_rate; - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); - struct cpufreq_policy *policy; - - if (!this_dbs_info->enable) - return; - - policy = this_dbs_info->cur_policy; - - /* - * The default safe range is 20% to 80% - * Every sampling_rate, we check - * - If current idle time is less than 20%, then we try to - * increase frequency - * Every sampling_rate*sampling_down_factor, we check - * - If current idle time is more than 80%, then we try to - * decrease frequency - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of max_frequency - */ - - /* Check for frequency increase */ - idle_ticks = UINT_MAX; - - /* Check for frequency increase */ - total_idle_ticks = get_cpu_idle_time(cpu); - tmp_idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_up; - this_dbs_info->prev_cpu_idle_up = total_idle_ticks; - - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; - - /* Scale idle ticks by 100 and compare with up and down ticks */ - idle_ticks *= 100; - up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * - usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - if (idle_ticks < up_idle_ticks) { - this_dbs_info->down_skip = 0; - this_dbs_info->prev_cpu_idle_down = - this_dbs_info->prev_cpu_idle_up; - - /* if we are already at full speed then break out early */ - if (this_dbs_info->requested_freq == policy->max && !suspended) - return; - - //freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - if (suspended) - freq_target = (FREQ_STEP_UP_SLEEP_PERCENT * policy->max) / 100; - else - freq_target = policy->max; - - /* max freq cannot be less than 100. But who knows.... */ - if (unlikely(freq_target == 0)) - freq_target = 5; - - this_dbs_info->requested_freq += freq_target; - if (this_dbs_info->requested_freq > policy->max) - this_dbs_info->requested_freq = policy->max; - - //Screen off mode - if (suspended && this_dbs_info->requested_freq > FREQ_SLEEP_MAX) - this_dbs_info->requested_freq = FREQ_SLEEP_MAX; - - //Screen off mode - if (!suspended && this_dbs_info->requested_freq < FREQ_AWAKE_MIN) - this_dbs_info->requested_freq = FREQ_AWAKE_MIN; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, - CPUFREQ_RELATION_H); - return; - } - - /* Check for frequency decrease */ - this_dbs_info->down_skip++; - if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) - return; - - /* Check for frequency decrease */ - total_idle_ticks = this_dbs_info->prev_cpu_idle_up; - tmp_idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_down; - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; - - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; - - /* Scale idle ticks by 100 and compare with up and down ticks */ - idle_ticks *= 100; - this_dbs_info->down_skip = 0; - - freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * - dbs_tuners_ins.sampling_down_factor; - down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * - usecs_to_jiffies(freq_down_sampling_rate); - - if (idle_ticks > down_idle_ticks) { - /* - * if we are already at the lowest speed then break out early - * or if we 'cannot' reduce the speed as the user might want - * freq_target to be zero - */ - if (this_dbs_info->requested_freq == policy->min && suspended - /*|| dbs_tuners_ins.freq_step == 0*/) - return; - - //freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - freq_target = FREQ_STEP_DOWN; //policy->max; - - /* max freq cannot be less than 100. But who knows.... */ - if (unlikely(freq_target == 0)) - freq_target = 5; - - // prevent going under 0 - if(freq_target > this_dbs_info->requested_freq) - this_dbs_info->requested_freq = policy->min; - else - this_dbs_info->requested_freq -= freq_target; - - if (this_dbs_info->requested_freq < policy->min) - this_dbs_info->requested_freq = policy->min; - - //Screen on mode - if (!suspended && this_dbs_info->requested_freq < FREQ_AWAKE_MIN) - this_dbs_info->requested_freq = FREQ_AWAKE_MIN; - - //Screen off mode - if (suspended && this_dbs_info->requested_freq > FREQ_SLEEP_MAX) - this_dbs_info->requested_freq = FREQ_SLEEP_MAX; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, - CPUFREQ_RELATION_H); - return; - } -} - -static void do_dbs_timer(struct work_struct *work) -{ - int i; - mutex_lock(&dbs_mutex); - for_each_online_cpu(i) - dbs_check_cpu(i); - schedule_delayed_work(&dbs_work, - usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); - mutex_unlock(&dbs_mutex); -} - -static inline void dbs_timer_init(void) -{ - init_timer_deferrable(&dbs_work.timer); - schedule_delayed_work(&dbs_work, - usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); - return; -} - -static inline void dbs_timer_exit(void) -{ - cancel_delayed_work(&dbs_work); - return; -} - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - if (this_dbs_info->enable) /* Already enabled */ - break; - - mutex_lock(&dbs_mutex); - - rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); - j_dbs_info->prev_cpu_idle_down - = j_dbs_info->prev_cpu_idle_up; - } - this_dbs_info->enable = 1; - this_dbs_info->down_skip = 0; - this_dbs_info->requested_freq = policy->cur; - - dbs_enable++; - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - unsigned int latency; - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - - def_sampling_rate = 10 * latency * - CONFIG_CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER; - - if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) - def_sampling_rate = MIN_STAT_SAMPLING_RATE; - - dbs_tuners_ins.sampling_rate = def_sampling_rate; - - dbs_timer_init(); - cpufreq_register_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } - - mutex_unlock(&dbs_mutex); - break; - - case CPUFREQ_GOV_STOP: - mutex_lock(&dbs_mutex); - this_dbs_info->enable = 0; - sysfs_remove_group(&policy->kobj, &dbs_attr_group); - dbs_enable--; - /* - * Stop the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 0) { - dbs_timer_exit(); - cpufreq_unregister_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } - - mutex_unlock(&dbs_mutex); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&dbs_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&dbs_mutex); - break; - } - return 0; -} - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_LAGFREE -static -#endif -struct cpufreq_governor cpufreq_gov_lagfree = { - .name = "lagfree", - .governor = cpufreq_governor_dbs, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, -}; - -static void lagfree_early_suspend(struct early_suspend *handler) { - suspended = 1; -} - -static void lagfree_late_resume(struct early_suspend *handler) { - suspended = 0; -} - -static struct early_suspend lagfree_power_suspend = { - .suspend = lagfree_early_suspend, - .resume = lagfree_late_resume, - .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, -}; - -static int __init cpufreq_gov_dbs_init(void) -{ - register_early_suspend(&lagfree_power_suspend); - return cpufreq_register_governor(&cpufreq_gov_lagfree); -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - /* Make sure that the scheduled work is indeed not running */ - flush_scheduled_work(); - - unregister_early_suspend(&lagfree_power_suspend); - cpufreq_unregister_governor(&cpufreq_gov_lagfree); -} - - -MODULE_AUTHOR ("Emilio López "); -MODULE_DESCRIPTION ("'cpufreq_lagfree' - A dynamic cpufreq governor for " - "Low Latency Frequency Transition capable processors " - "optimised for use in a battery environment" - "Based on conservative by Alexander Clouter"); -MODULE_LICENSE ("GPL"); - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_LAGFREE -fs_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); -#endif -module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_lulzactive.c b/drivers/cpufreq/cpufreq_lulzactive.c deleted file mode 100644 index ab5506a6..00000000 --- a/drivers/cpufreq/cpufreq_lulzactive.c +++ /dev/null @@ -1,1143 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_lulzactive.c - * - * Copyright (C) 2010 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Author: Mike Chan (mike@android.com) - * Edited: Tegrak (luciferanna@gmail.com) - * - * Driver values in /sys/devices/system/cpu/cpufreq/lulzactive - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define LULZACTIVE_VERSION (2) -#define LULZACTIVE_AUTHOR "tegrak" - -// if you changed some codes for optimization, just write your name here. -#define LULZACTIVE_TUNER "simone201" - -#define LOGI(fmt...) printk(KERN_INFO "[lulzactive] " fmt) -#define LOGW(fmt...) printk(KERN_WARNING "[lulzactive] " fmt) -#define LOGD(fmt...) printk(KERN_DEBUG "[lulzactive] " fmt) - -static void (*pm_idle_old)(void); -static atomic_t active_count = ATOMIC_INIT(0); - -struct cpufreq_lulzactive_cpuinfo { - struct timer_list cpu_timer; - int timer_idlecancel; - u64 time_in_idle; - u64 idle_exit_time; - u64 timer_run_time; - int idling; - u64 freq_change_time; - u64 freq_change_time_in_idle; - struct cpufreq_policy *policy; - struct cpufreq_frequency_table *freq_table; - unsigned int freq_table_size; - unsigned int target_freq; - int governor_enabled; -}; - -static DEFINE_PER_CPU(struct cpufreq_lulzactive_cpuinfo, cpuinfo); - -/* Workqueues handle frequency scaling */ -static struct task_struct *up_task; -static struct workqueue_struct *down_wq; -static struct work_struct freq_scale_down_work; -static cpumask_t up_cpumask; -static spinlock_t up_cpumask_lock; -static cpumask_t down_cpumask; -static spinlock_t down_cpumask_lock; - -/* - * The minimum amount of time to spend at a frequency before we can step up. - */ -#define DEFAULT_UP_SAMPLE_TIME 20000 -static unsigned long up_sample_time; - -/* - * The minimum amount of time to spend at a frequency before we can step down. - */ -#define DEFAULT_DOWN_SAMPLE_TIME 40000 -static unsigned long down_sample_time; - -/* - * DEBUG print flags - */ -static unsigned long debug_mode; -enum { - LULZACTIVE_DEBUG_EARLY_SUSPEND=1, - LULZACTIVE_DEBUG_START_STOP=2, - LULZACTIVE_DEBUG_LOAD=4, - LULZACTIVE_DEBUG_SUSPEND=8, -}; -//#define DEFAULT_DEBUG_MODE (LULZACTIVE_DEBUG_EARLY_SUSPEND | LULZACTIVE_DEBUG_START_STOP | LULZACTIVE_DEBUG_SUSPEND) -#define DEFAULT_DEBUG_MODE (0) - -/* - * CPU freq will be increased if measured load > inc_cpu_load; - */ -#define DEFAULT_INC_CPU_LOAD 75 -static unsigned long inc_cpu_load; - -/* - * CPU freq will be decreased if measured load < dec_cpu_load; - * not implemented yet. - */ -#define DEFAULT_DEC_CPU_LOAD 30 -static unsigned long dec_cpu_load; - -/* - * Increasing frequency table index - * zero disables and causes to always jump straight to max frequency. - */ -#define DEFAULT_PUMP_UP_STEP 1 -static unsigned long pump_up_step; - -/* - * Decreasing frequency table index - * zero disables and will calculate frequency according to load heuristic. - */ -#define DEFAULT_PUMP_DOWN_STEP 1 -static unsigned long pump_down_step; - -/* - * Use minimum frequency while suspended. - */ -static unsigned int suspending; -static unsigned int early_suspended; - -#define SCREEN_OFF_LOWEST_STEP (0xffffffff) -#define DEFAULT_SCREEN_OFF_MIN_STEP (SCREEN_OFF_LOWEST_STEP) -static unsigned long screen_off_min_step; - -#define DEBUG 0 -#define BUFSZ 128 - -#if DEBUG -#include - -struct dbgln { - int cpu; - unsigned long jiffy; - unsigned long run; - char buf[BUFSZ]; -}; - -#define NDBGLNS 256 - -static struct dbgln dbgbuf[NDBGLNS]; -static int dbgbufs; -static int dbgbufe; -static struct proc_dir_entry *dbg_proc; -static spinlock_t dbgpr_lock; - -static u64 up_request_time; -static unsigned int up_max_latency; - -static void dbgpr(char *fmt, ...) -{ - va_list args; - int n; - unsigned long flags; - - spin_lock_irqsave(&dbgpr_lock, flags); - n = dbgbufe; - va_start(args, fmt); - vsnprintf(dbgbuf[n].buf, BUFSZ, fmt, args); - va_end(args); - dbgbuf[n].cpu = smp_processor_id(); - dbgbuf[n].run = nr_running(); - dbgbuf[n].jiffy = jiffies; - - if (++dbgbufe >= NDBGLNS) - dbgbufe = 0; - - if (dbgbufe == dbgbufs) - if (++dbgbufs >= NDBGLNS) - dbgbufs = 0; - - spin_unlock_irqrestore(&dbgpr_lock, flags); -} - -static void dbgdump(void) -{ - int i, j; - unsigned long flags; - static struct dbgln prbuf[NDBGLNS]; - - spin_lock_irqsave(&dbgpr_lock, flags); - i = dbgbufs; - j = dbgbufe; - memcpy(prbuf, dbgbuf, sizeof(dbgbuf)); - dbgbufs = 0; - dbgbufe = 0; - spin_unlock_irqrestore(&dbgpr_lock, flags); - - while (i != j) - { - printk("%lu %d %lu %s", - prbuf[i].jiffy, prbuf[i].cpu, prbuf[i].run, - prbuf[i].buf); - if (++i == NDBGLNS) - i = 0; - } -} - -static int dbg_proc_read(char *buffer, char **start, off_t offset, - int count, int *peof, void *dat) -{ - printk("max up_task latency=%uus\n", up_max_latency); - dbgdump(); - *peof = 1; - return 0; -} - - -#else -#define dbgpr(...) do {} while (0) -#endif - -static int cpufreq_governor_lulzactive(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_LULZACTIVE -static -#endif -struct cpufreq_governor cpufreq_gov_lulzactive = { - .name = "lulzactive", - .governor = cpufreq_governor_lulzactive, - .max_transition_latency = 9000000, - .owner = THIS_MODULE, -}; - -static unsigned int get_freq_table_size(struct cpufreq_frequency_table *freq_table) { - unsigned int size = 0; - while (freq_table[++size].frequency != CPUFREQ_TABLE_END); - return size; -} - -static inline void fix_screen_off_min_step(struct cpufreq_lulzactive_cpuinfo *pcpu) { - if (pcpu->freq_table_size <= 0) { - screen_off_min_step = 0; - return; - } - - if (DEFAULT_SCREEN_OFF_MIN_STEP == screen_off_min_step) - screen_off_min_step = pcpu->freq_table_size - 3; - - if (screen_off_min_step >= pcpu->freq_table_size) - screen_off_min_step = pcpu->freq_table_size - 3; -} - -static inline unsigned int adjust_screen_off_freq( - struct cpufreq_lulzactive_cpuinfo *pcpu, unsigned int freq) { - - if (early_suspended && freq > pcpu->freq_table[screen_off_min_step].frequency) { - freq = pcpu->freq_table[screen_off_min_step].frequency; - pcpu->target_freq = pcpu->policy->cur; - - if (freq > pcpu->policy->max) - freq = pcpu->policy->max; - if (freq < pcpu->policy->min) - freq = pcpu->policy->min; - } - - return freq; -} - -static void cpufreq_lulzactive_timer(unsigned long data) -{ - unsigned int delta_idle; - unsigned int delta_time; - int cpu_load; - int load_since_change; - u64 time_in_idle; - u64 idle_exit_time; - struct cpufreq_lulzactive_cpuinfo *pcpu = - &per_cpu(cpuinfo, data); - u64 now_idle; - unsigned int new_freq; - int index; - int ret; - - /* - * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time, - * this lets idle exit know the current idle time sample has - * been processed, and idle exit can generate a new sample and - * re-arm the timer. This prevents a concurrent idle - * exit on that CPU from writing a new set of info at the same time - * the timer function runs (the timer function can't use that info - * until more time passes). - */ - time_in_idle = pcpu->time_in_idle; - idle_exit_time = pcpu->idle_exit_time; - now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time); - smp_wmb(); - - /* If we raced with cancelling a timer, skip. */ - if (!idle_exit_time) { - dbgpr("timer %d: no valid idle exit sample\n", (int) data); - goto exit; - } - - /* let it be when s5pv310 contorl the suspending by tegrak */ - //if (suspending) { - // goto rearm; - //} - -#if DEBUG - if ((int) jiffies - (int) pcpu->cpu_timer.expires >= 10) - dbgpr("timer %d: late by %d ticks\n", - (int) data, jiffies - pcpu->cpu_timer.expires); -#endif - - delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle); - delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time, - idle_exit_time); - - /* - * If timer ran less than 1ms after short-term sample started, retry. - */ - if (delta_time < 1000) { - dbgpr("timer %d: time delta %u too short exit=%llu now=%llu\n", (int) data, - delta_time, idle_exit_time, pcpu->timer_run_time); - goto rearm; - } - - if (delta_idle > delta_time) - cpu_load = 0; - else - cpu_load = 100 * (delta_time - delta_idle) / delta_time; - - delta_idle = (unsigned int) cputime64_sub(now_idle, - pcpu->freq_change_time_in_idle); - delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time, - pcpu->freq_change_time); - - if (delta_idle > delta_time) - load_since_change = 0; - else - load_since_change = - 100 * (delta_time - delta_idle) / delta_time; - - /* - * Choose greater of short-term load (since last idle timer - * started or timer function re-armed itself) or long-term load - * (since last frequency change). - */ - if (load_since_change > cpu_load) - cpu_load = load_since_change; - - /* - * START lulzactive algorithm section - */ - if (cpu_load >= inc_cpu_load) { - if (pump_up_step && pcpu->policy->cur < pcpu->policy->max) { - ret = cpufreq_frequency_table_target( - pcpu->policy, pcpu->freq_table, - pcpu->policy->cur, CPUFREQ_RELATION_H, - &index); - if (ret < 0) { - goto rearm; - } - - // apply pump_up_step by tegrak - index -= pump_up_step; - if (index < 0) - index = 0; - - new_freq = pcpu->freq_table[index].frequency; - } - else { - new_freq = pcpu->policy->max; - } - } - else { - if (pump_down_step) { - ret = cpufreq_frequency_table_target( - pcpu->policy, pcpu->freq_table, - pcpu->policy->cur, CPUFREQ_RELATION_H, - &index); - if (ret < 0) { - goto rearm; - } - - // apply pump_down_step by tegrak - index += pump_down_step; - if (index >= pcpu->freq_table_size) { - index = pcpu->freq_table_size - 1; - } - - new_freq = (pcpu->policy->cur > pcpu->policy->min) ? - (pcpu->freq_table[index].frequency) : - (pcpu->policy->min); - } - else { - new_freq = pcpu->policy->max * cpu_load / 100; - ret = cpufreq_frequency_table_target( - pcpu->policy, pcpu->freq_table, - new_freq, CPUFREQ_RELATION_H, - &index); - if (ret < 0) { - goto rearm; - } - new_freq = pcpu->freq_table[index].frequency; - } - } - - // adjust freq when screen off - new_freq = adjust_screen_off_freq(pcpu, new_freq); - - if (pcpu->target_freq == new_freq) - { - dbgpr("timer %d: load=%d, already at %d\n", (int) data, cpu_load, new_freq); - goto rearm_if_notmax; - } - - /* - * Do not scale down unless we have been at this frequency for the - * minimum sample time. - */ - if (new_freq < pcpu->target_freq) { - if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) < - down_sample_time) { - dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq); - goto rearm; - } - } - else { - if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) < - up_sample_time) { - dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq); - /* don't reset timer */ - goto rearm; - } - } - - if (suspending && debug_mode & LULZACTIVE_DEBUG_SUSPEND) { - LOGI("suspending: cpu_load=%d%% new_freq=%u ppcpu->policy->cur=%u\n", - cpu_load, new_freq, pcpu->policy->cur); - } - if (early_suspended && !suspending && debug_mode & LULZACTIVE_DEBUG_LOAD) { - LOGI("early_suspended: cpu_load=%d%% new_freq=%u ppcpu->policy->cur=%u\n", - cpu_load, new_freq, pcpu->policy->cur); - } - if (debug_mode & LULZACTIVE_DEBUG_LOAD && !early_suspended && !suspending) { - LOGI("cpu_load=%d%% new_freq=%u pcpu->target_freq=%u pcpu->policy->cur=%u\n", - cpu_load, new_freq, pcpu->target_freq, pcpu->policy->cur); - } - - dbgpr("timer %d: load=%d cur=%d tgt=%d queue\n", (int) data, cpu_load, pcpu->target_freq, new_freq); - - if (new_freq < pcpu->target_freq) { - pcpu->target_freq = new_freq; - spin_lock(&down_cpumask_lock); - cpumask_set_cpu(data, &down_cpumask); - spin_unlock(&down_cpumask_lock); - queue_work(down_wq, &freq_scale_down_work); - } else { - pcpu->target_freq = new_freq; -#if DEBUG - up_request_time = ktime_to_us(ktime_get()); -#endif - spin_lock(&up_cpumask_lock); - cpumask_set_cpu(data, &up_cpumask); - spin_unlock(&up_cpumask_lock); - wake_up_process(up_task); - } - -rearm_if_notmax: - /* - * Already set max speed and don't see a need to change that, - * wait until next idle to re-evaluate, don't need timer. - */ - if (pcpu->target_freq == pcpu->policy->max) - goto exit; - -rearm: - if (!timer_pending(&pcpu->cpu_timer)) { - /* - * If already at min: if that CPU is idle, don't set timer. - * Else cancel the timer if that CPU goes idle. We don't - * need to re-evaluate speed until the next idle exit. - */ - if (pcpu->target_freq == pcpu->policy->min) { - smp_rmb(); - - if (pcpu->idling) { - dbgpr("timer %d: cpu idle, don't re-arm\n", (int) data); - goto exit; - } - - pcpu->timer_idlecancel = 1; - } - - pcpu->time_in_idle = get_cpu_idle_time_us( - data, &pcpu->idle_exit_time); - mod_timer(&pcpu->cpu_timer, jiffies + 2); - dbgpr("timer %d: set timer for %lu exit=%llu\n", (int) data, pcpu->cpu_timer.expires, pcpu->idle_exit_time); - } - -exit: - return; -} - -static void cpufreq_lulzactive_idle(void) -{ - struct cpufreq_lulzactive_cpuinfo *pcpu = - &per_cpu(cpuinfo, smp_processor_id()); - int pending; - - if (!pcpu->governor_enabled) { - pm_idle_old(); - return; - } - - pcpu->idling = 1; - smp_wmb(); - pending = timer_pending(&pcpu->cpu_timer); - - if (pcpu->target_freq != pcpu->policy->min) { -#ifdef CONFIG_SMP - /* - * Entering idle while not at lowest speed. On some - * platforms this can hold the other CPU(s) at that speed - * even though the CPU is idle. Set a timer to re-evaluate - * speed so this idle CPU doesn't hold the other CPUs above - * min indefinitely. This should probably be a quirk of - * the CPUFreq driver. - */ - if (!pending) { - pcpu->time_in_idle = get_cpu_idle_time_us( - smp_processor_id(), &pcpu->idle_exit_time); - pcpu->timer_idlecancel = 0; - mod_timer(&pcpu->cpu_timer, jiffies + 2); - dbgpr("idle: enter at %d, set timer for %lu exit=%llu\n", - pcpu->target_freq, pcpu->cpu_timer.expires, - pcpu->idle_exit_time); - } -#endif - } else { - /* - * If at min speed and entering idle after load has - * already been evaluated, and a timer has been set just in - * case the CPU suddenly goes busy, cancel that timer. The - * CPU didn't go busy; we'll recheck things upon idle exit. - */ - if (pending && pcpu->timer_idlecancel) { - dbgpr("idle: cancel timer for %lu\n", pcpu->cpu_timer.expires); - del_timer(&pcpu->cpu_timer); - /* - * Ensure last timer run time is after current idle - * sample start time, so next idle exit will always - * start a new idle sampling period. - */ - pcpu->idle_exit_time = 0; - pcpu->timer_idlecancel = 0; - } - } - - pm_idle_old(); - pcpu->idling = 0; - smp_wmb(); - - /* - * Arm the timer for 1-2 ticks later if not already, and if the timer - * function has already processed the previous load sampling - * interval. (If the timer is not pending but has not processed - * the previous interval, it is probably racing with us on another - * CPU. Let it compute load based on the previous sample and then - * re-arm the timer for another interval when it's done, rather - * than updating the interval start time to be "now", which doesn't - * give the timer function enough time to make a decision on this - * run.) - */ - if (timer_pending(&pcpu->cpu_timer) == 0 && - pcpu->timer_run_time >= pcpu->idle_exit_time) { - pcpu->time_in_idle = - get_cpu_idle_time_us(smp_processor_id(), - &pcpu->idle_exit_time); - pcpu->timer_idlecancel = 0; - mod_timer(&pcpu->cpu_timer, jiffies + 2); - dbgpr("idle: exit, set timer for %lu exit=%llu\n", pcpu->cpu_timer.expires, pcpu->idle_exit_time); -#if DEBUG - } else if (timer_pending(&pcpu->cpu_timer) == 0 && - pcpu->timer_run_time < pcpu->idle_exit_time) { - dbgpr("idle: timer not run yet: exit=%llu tmrrun=%llu\n", - pcpu->idle_exit_time, pcpu->timer_run_time); -#endif - } - -} - -static int cpufreq_lulzactive_up_task(void *data) -{ - unsigned int cpu; - cpumask_t tmp_mask; - struct cpufreq_lulzactive_cpuinfo *pcpu; - -#if DEBUG - u64 now; - u64 then; - unsigned int lat; -#endif - - while (1) { - set_current_state(TASK_INTERRUPTIBLE); - spin_lock(&up_cpumask_lock); - - if (cpumask_empty(&up_cpumask)) { - spin_unlock(&up_cpumask_lock); - schedule(); - - if (kthread_should_stop()) - break; - - spin_lock(&up_cpumask_lock); - } - - set_current_state(TASK_RUNNING); - -#if DEBUG - then = up_request_time; - now = ktime_to_us(ktime_get()); - - if (now > then) { - lat = ktime_to_us(ktime_get()) - then; - - if (lat > up_max_latency) - up_max_latency = lat; - } -#endif - - tmp_mask = up_cpumask; - cpumask_clear(&up_cpumask); - spin_unlock(&up_cpumask_lock); - - for_each_cpu(cpu, &tmp_mask) { - pcpu = &per_cpu(cpuinfo, cpu); - - if (nr_running() == 1) { - dbgpr("up %d: tgt=%d nothing else running\n", cpu, - pcpu->target_freq); - } - - __cpufreq_driver_target(pcpu->policy, - pcpu->target_freq, - CPUFREQ_RELATION_H); - pcpu->freq_change_time_in_idle = - get_cpu_idle_time_us(cpu, - &pcpu->freq_change_time); - dbgpr("up %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur); - } - } - - return 0; -} - -static void cpufreq_lulzactive_freq_down(struct work_struct *work) -{ - unsigned int cpu; - cpumask_t tmp_mask; - struct cpufreq_lulzactive_cpuinfo *pcpu; - - spin_lock(&down_cpumask_lock); - tmp_mask = down_cpumask; - cpumask_clear(&down_cpumask); - spin_unlock(&down_cpumask_lock); - - for_each_cpu(cpu, &tmp_mask) { - pcpu = &per_cpu(cpuinfo, cpu); - __cpufreq_driver_target(pcpu->policy, - pcpu->target_freq, - CPUFREQ_RELATION_H); - pcpu->freq_change_time_in_idle = - get_cpu_idle_time_us(cpu, - &pcpu->freq_change_time); - dbgpr("down %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur); - } -} - -// inc_cpu_load -static ssize_t show_inc_cpu_load(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", inc_cpu_load); -} - -static ssize_t store_inc_cpu_load(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - ssize_t ret; - if(strict_strtoul(buf, 0, &inc_cpu_load)==-EINVAL) return -EINVAL; - - if (inc_cpu_load > 100) { - inc_cpu_load = 100; - } - else if (inc_cpu_load < 10) { - inc_cpu_load = 10; - } - return count; -} - -static struct global_attr inc_cpu_load_attr = __ATTR(inc_cpu_load, 0666, - show_inc_cpu_load, store_inc_cpu_load); - -// down_sample_time -static ssize_t show_down_sample_time(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", down_sample_time); -} - -static ssize_t store_down_sample_time(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - if(strict_strtoul(buf, 0, &down_sample_time)==-EINVAL) return -EINVAL; - return count; -} - -static struct global_attr down_sample_time_attr = __ATTR(down_sample_time, 0666, - show_down_sample_time, store_down_sample_time); - -// up_sample_time -static ssize_t show_up_sample_time(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", up_sample_time); -} - -static ssize_t store_up_sample_time(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - if(strict_strtoul(buf, 0, &up_sample_time)==-EINVAL) return -EINVAL; - return count; -} - -static struct global_attr up_sample_time_attr = __ATTR(up_sample_time, 0666, - show_up_sample_time, store_up_sample_time); - -// debug_mode -static ssize_t show_debug_mode(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", debug_mode); -} - -static ssize_t store_debug_mode(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - if(strict_strtoul(buf, 0, &debug_mode)==-EINVAL) return -EINVAL; - return count; -} - -static struct global_attr debug_mode_attr = __ATTR(debug_mode, 0666, - show_debug_mode, store_debug_mode); - -// pump_up_step -static ssize_t show_pump_up_step(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", pump_up_step); -} - -static ssize_t store_pump_up_step(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - if(strict_strtoul(buf, 0, &pump_up_step)==-EINVAL) return -EINVAL; - return count; -} - -static struct global_attr pump_up_step_attr = __ATTR(pump_up_step, 0666, - show_pump_up_step, store_pump_up_step); - -// pump_down_step -static ssize_t show_pump_down_step(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", pump_down_step); -} - -static ssize_t store_pump_down_step(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - ssize_t ret; - struct cpufreq_lulzactive_cpuinfo *pcpu; - - if(strict_strtoul(buf, 0, &pump_down_step)==-EINVAL) return -EINVAL; - - pcpu = &per_cpu(cpuinfo, 0); - // fix out of bound - if (pcpu->freq_table_size <= pump_down_step) { - pump_down_step = pcpu->freq_table_size - 1; - } - return count; -} - -static struct global_attr pump_down_step_attr = __ATTR(pump_down_step, 0666, - show_pump_down_step, store_pump_down_step); - -// screen_off_min_step -static ssize_t show_screen_off_min_step(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - struct cpufreq_lulzactive_cpuinfo *pcpu; - - pcpu = &per_cpu(cpuinfo, 0); - fix_screen_off_min_step(pcpu); - - return sprintf(buf, "%lu\n", screen_off_min_step); -} - -static ssize_t store_screen_off_min_step(struct kobject *kobj, - struct attribute *attr, const char *buf, size_t count) -{ - struct cpufreq_lulzactive_cpuinfo *pcpu; - ssize_t ret; - - if(strict_strtoul(buf, 0, &screen_off_min_step)==-EINVAL) return -EINVAL; - - pcpu = &per_cpu(cpuinfo, 0); - fix_screen_off_min_step(pcpu); - - return count; -} - -static struct global_attr screen_off_min_step_attr = __ATTR(screen_off_min_step, 0666, - show_screen_off_min_step, store_screen_off_min_step); - -// author -static ssize_t show_author(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%s\n", LULZACTIVE_AUTHOR); -} - -static struct global_attr author_attr = __ATTR(author, 0444, - show_author, NULL); - -// tuner -static ssize_t show_tuner(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%s\n", LULZACTIVE_TUNER); -} - -static struct global_attr tuner_attr = __ATTR(tuner, 0444, - show_tuner, NULL); - -// version -static ssize_t show_version(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", LULZACTIVE_VERSION); -} - -static struct global_attr version_attr = __ATTR(version, 0444, - show_version, NULL); - -// freq_table -static ssize_t show_freq_table(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - struct cpufreq_lulzactive_cpuinfo *pcpu; - char temp[64]; - int i; - - pcpu = &per_cpu(cpuinfo, 0); - - for (i = 0; i < pcpu->freq_table_size; i++) { - sprintf(temp, "%u\n", pcpu->freq_table[i].frequency); - strcat(buf, temp); - } - - return strlen(buf); -} - -static struct global_attr freq_table_attr = __ATTR(freq_table, 0444, - show_freq_table, NULL); - -static struct attribute *lulzactive_attributes[] = { - &inc_cpu_load_attr.attr, - &up_sample_time_attr.attr, - &down_sample_time_attr.attr, - &pump_up_step_attr.attr, - &pump_down_step_attr.attr, - &screen_off_min_step_attr.attr, - &debug_mode_attr.attr, - &author_attr.attr, - &tuner_attr.attr, - &version_attr.attr, - &freq_table_attr.attr, - NULL, -}; - -static struct attribute_group lulzactive_attr_group = { - .attrs = lulzactive_attributes, - .name = "lulzactive", -}; - -static int cpufreq_governor_lulzactive(struct cpufreq_policy *new_policy, - unsigned int event) -{ - int rc; - struct cpufreq_lulzactive_cpuinfo *pcpu = - &per_cpu(cpuinfo, new_policy->cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if (debug_mode & LULZACTIVE_DEBUG_START_STOP) { - LOGI("CPUFREQ_GOV_START\n"); - } - if (!cpu_online(new_policy->cpu)) - return -EINVAL; - - pcpu->policy = new_policy; - pcpu->freq_table = cpufreq_frequency_get_table(new_policy->cpu); - pcpu->target_freq = new_policy->cur; - pcpu->freq_change_time_in_idle = - get_cpu_idle_time_us(new_policy->cpu, - &pcpu->freq_change_time); - pcpu->governor_enabled = 1; - pcpu->freq_table_size = get_freq_table_size(pcpu->freq_table); - - // fix invalid screen_off_min_step - fix_screen_off_min_step(pcpu); - - /* - * Do not register the idle hook and create sysfs - * entries if we have already done so. - */ - if (atomic_inc_return(&active_count) > 1) - return 0; - - rc = sysfs_create_group(cpufreq_global_kobject, - &lulzactive_attr_group); - if (rc) - return rc; - - pm_idle_old = pm_idle; - pm_idle = cpufreq_lulzactive_idle; - break; - - case CPUFREQ_GOV_STOP: - if (debug_mode & LULZACTIVE_DEBUG_START_STOP) { - LOGI("CPUFREQ_GOV_STOP\n"); - } - pcpu->governor_enabled = 0; - - if (atomic_dec_return(&active_count) > 0) - return 0; - - sysfs_remove_group(cpufreq_global_kobject, - &lulzactive_attr_group); - - pm_idle = pm_idle_old; - del_timer(&pcpu->cpu_timer); - break; - - case CPUFREQ_GOV_LIMITS: - if (new_policy->max < new_policy->cur) - __cpufreq_driver_target(new_policy, - new_policy->max, CPUFREQ_RELATION_H); - else if (new_policy->min > new_policy->cur) - __cpufreq_driver_target(new_policy, - new_policy->min, CPUFREQ_RELATION_L); - break; - } - return 0; -} - -static void lulzactive_early_suspend(struct early_suspend *handler) { - struct cpufreq_lulzactive_cpuinfo *pcpu; - unsigned int min_freq, max_freq; - - early_suspended = 1; - - if (debug_mode & LULZACTIVE_DEBUG_EARLY_SUSPEND) { - LOGI("%s\n", __func__); - - pcpu = &per_cpu(cpuinfo, 0); - - min_freq = pcpu->policy->min; - - max_freq = min(pcpu->policy->max, pcpu->freq_table[screen_off_min_step].frequency); - max_freq = max(max_freq, min_freq); - - LOGI("lock @%u~@%uMHz\n", min_freq / 1000, max_freq / 1000); - } -} - -static void lulzactive_late_resume(struct early_suspend *handler) { - early_suspended = 0; - if (debug_mode & LULZACTIVE_DEBUG_EARLY_SUSPEND) { - LOGI("%s\n", __func__); - } -} - -static struct early_suspend lulzactive_power_suspend = { - .suspend = lulzactive_early_suspend, - .resume = lulzactive_late_resume, - .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, -}; - -static int lulzactive_pm_notifier_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct cpufreq_policy* policy; - - switch (event) { - case PM_SUSPEND_PREPARE: - suspending = 1; - if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { - LOGI("PM_SUSPEND_PREPARE"); - policy = cpufreq_cpu_get(0); - if (policy) { - LOGI("PM_SUSPEND_PREPARE using @%uMHz\n", policy->cur); - } - } - break; - case PM_POST_SUSPEND: - suspending = 0; - if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { - LOGI("PM_POST_SUSPEND"); - policy = cpufreq_cpu_get(0); - if (policy) { - LOGI("PM_POST_SUSPEND using @%uMHz\n", policy->cur); - } - } - break; - case PM_RESTORE_PREPARE: - if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { - LOGI("PM_RESTORE_PREPARE"); - } - break; - case PM_POST_RESTORE: - if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { - LOGI("PM_POST_RESTORE"); - } - break; - case PM_HIBERNATION_PREPARE: - if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { - LOGI("PM_HIBERNATION_PREPARE"); - } - break; - case PM_POST_HIBERNATION: - if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) { - LOGI("PM_POST_HIBERNATION"); - } - break; - } - return NOTIFY_DONE; -} - -static struct notifier_block lulzactive_pm_notifier = { - .notifier_call = lulzactive_pm_notifier_event, -}; - -static int __init cpufreq_lulzactive_init(void) -{ - unsigned int i; - struct cpufreq_lulzactive_cpuinfo *pcpu; - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - - up_sample_time = DEFAULT_UP_SAMPLE_TIME; - down_sample_time = DEFAULT_DOWN_SAMPLE_TIME; - debug_mode = DEFAULT_DEBUG_MODE; - inc_cpu_load = DEFAULT_INC_CPU_LOAD; - dec_cpu_load = DEFAULT_DEC_CPU_LOAD; - pump_up_step = DEFAULT_PUMP_UP_STEP; - pump_down_step = DEFAULT_PUMP_DOWN_STEP; - early_suspended = 0; - suspending = 0; - screen_off_min_step = DEFAULT_SCREEN_OFF_MIN_STEP; - - /* Initalize per-cpu timers */ - for_each_possible_cpu(i) { - pcpu = &per_cpu(cpuinfo, i); - init_timer(&pcpu->cpu_timer); - pcpu->cpu_timer.function = cpufreq_lulzactive_timer; - pcpu->cpu_timer.data = i; - } - - up_task = kthread_create(cpufreq_lulzactive_up_task, NULL, - "klulzactiveup"); - if (IS_ERR(up_task)) - return PTR_ERR(up_task); - - sched_setscheduler_nocheck(up_task, SCHED_FIFO, ¶m); - get_task_struct(up_task); - - /* No rescuer thread, bind to CPU queuing the work for possibly - warm cache (probably doesn't matter much). */ - down_wq = create_workqueue("klulzactive_down"); - - if (! down_wq) - goto err_freeuptask; - - INIT_WORK(&freq_scale_down_work, - cpufreq_lulzactive_freq_down); - -#if DEBUG - spin_lock_init(&dbgpr_lock); - dbg_proc = create_proc_entry("igov", S_IWUSR | S_IRUGO, NULL); - dbg_proc->read_proc = dbg_proc_read; -#endif - spin_lock_init(&down_cpumask_lock); - spin_lock_init(&up_cpumask_lock); - - register_pm_notifier(&lulzactive_pm_notifier); - register_early_suspend(&lulzactive_power_suspend); - - return cpufreq_register_governor(&cpufreq_gov_lulzactive); - -err_freeuptask: - put_task_struct(up_task); - return -ENOMEM; -} - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_LULZACTIVE -fs_initcall(cpufreq_lulzactive_init); -#else -module_init(cpufreq_lulzactive_init); -#endif - -static void __exit cpufreq_lulzactive_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_lulzactive); - unregister_early_suspend(&lulzactive_power_suspend); - unregister_pm_notifier(&lulzactive_pm_notifier); - kthread_stop(up_task); - put_task_struct(up_task); - destroy_workqueue(down_wq); -} - -module_exit(cpufreq_lulzactive_exit); - -MODULE_AUTHOR("Tegrak "); -MODULE_DESCRIPTION("'lulzactive' - improved interactive governor inspired by smartass"); -MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq_minmax.c b/drivers/cpufreq/cpufreq_minmax.c deleted file mode 100644 index 09dba0d2..00000000 --- a/drivers/cpufreq/cpufreq_minmax.c +++ /dev/null @@ -1,575 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_minmax.c - * - * Copyright (C) 2001 Russell King - * (C) 2003 Venkatesh Pallipadi . - * Jun Nakajima - * (C) 2004 Alexander Clouter - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This governor is an adapatation of the conservative governor. - * See the Documentation/cpu-freq/governors.txt for more information. - * - * Adapatation from conservative by Erasmux. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ - -#define DEF_FREQUENCY_UP_THRESHOLD (92) -#define DEF_FREQUENCY_DOWN_THRESHOLD (27) - -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers - * with CPUFREQ_ETERNAL), this governor will not work. - * All times here are in uS. - */ -static unsigned int def_sampling_rate; -#define MIN_SAMPLING_RATE_RATIO (2) -/* for correct statistics, we need at least 10 ticks between each measure */ -#define MIN_STAT_SAMPLING_RATE \ - (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(CONFIG_CPU_FREQ_MIN_TICKS)) -#define MIN_SAMPLING_RATE \ - (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) -#define MAX_SAMPLING_RATE (500 * def_sampling_rate) -#define DEF_SAMPLING_DOWN_FACTOR (10) -#define MAX_SAMPLING_DOWN_FACTOR (100) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) -#define CONFIG_CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER (500) -#define CONFIG_CPU_FREQ_MIN_TICKS (2) - -static void do_dbs_timer(struct work_struct *work); - -struct cpu_dbs_info_s { - struct cpufreq_policy *cur_policy; - unsigned int prev_cpu_idle_up; - unsigned int prev_cpu_idle_down; - unsigned int enable; - unsigned int down_skip; - unsigned int requested_freq; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - -/* - * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug - * lock and dbs_mutex. cpu_hotplug lock should always be held before - * dbs_mutex. If any function that can potentially take cpu_hotplug lock - * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then - * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock - * is recursive for the same process. -Venki - */ -static DEFINE_MUTEX (dbs_mutex); -static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); - -struct dbs_tuners { - unsigned int sampling_rate; - unsigned int sampling_down_factor; - unsigned int up_threshold; - unsigned int down_threshold; - unsigned int ignore_nice; -}; - -static struct dbs_tuners dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, - .ignore_nice = 0, -}; - -static inline unsigned int get_cpu_idle_time(unsigned int cpu) -{ - unsigned int add_nice = 0, ret; - - if (dbs_tuners_ins.ignore_nice) - add_nice = kstat_cpu(cpu).cpustat.nice; - - ret = kstat_cpu(cpu).cpustat.idle + - kstat_cpu(cpu).cpustat.iowait + - add_nice; - - return ret; -} - -/* keep track of frequency transitions */ -static int -dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, - void *data) -{ - struct cpufreq_freqs *freq = data; - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, - freq->cpu); - - if (!this_dbs_info->enable) - return 0; - - this_dbs_info->requested_freq = freq->new; - - return 0; -} - -static struct notifier_block dbs_cpufreq_notifier_block = { - .notifier_call = dbs_cpufreq_notifier -}; - -/************************** sysfs interface ************************/ -static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) -{ - return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); -} - -static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) -{ - return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); -} - -#define define_one_ro(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0444, show_##_name, NULL) - -define_one_ro(sampling_rate_max); -define_one_ro(sampling_rate_min); - -/* cpufreq_minmax Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct cpufreq_policy *unused, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(sampling_down_factor, sampling_down_factor); -show_one(up_threshold, up_threshold); -show_one(down_threshold, down_threshold); -show_one(ignore_nice_load, ignore_nice); - -static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf (buf, "%u", &input); - if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.sampling_down_factor = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_sampling_rate(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf (buf, "%u", &input); - - mutex_lock(&dbs_mutex); - if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.sampling_rate = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_up_threshold(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf (buf, "%u", &input); - - mutex_lock(&dbs_mutex); - if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.up_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_down_threshold(struct cpufreq_policy *unused, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf (buf, "%u", &input); - - mutex_lock(&dbs_mutex); - if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.down_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - mutex_lock(&dbs_mutex); - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - mutex_unlock(&dbs_mutex); - return count; - } - dbs_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); - j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; - } - mutex_unlock(&dbs_mutex); - - return count; -} - -#define define_one_rw(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0644, show_##_name, store_##_name) - -define_one_rw(sampling_rate); -define_one_rw(sampling_down_factor); -define_one_rw(up_threshold); -define_one_rw(down_threshold); -define_one_rw(ignore_nice_load); - -static struct attribute * dbs_attributes[] = { - &sampling_rate_max.attr, - &sampling_rate_min.attr, - &sampling_rate.attr, - &sampling_down_factor.attr, - &up_threshold.attr, - &down_threshold.attr, - &ignore_nice_load.attr, - NULL -}; - -static struct attribute_group dbs_attr_group = { - .attrs = dbs_attributes, - .name = "minmax", -}; - -/************************** sysfs end ************************/ - -static void dbs_check_cpu(int cpu) -{ - unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; - unsigned int tmp_idle_ticks, total_idle_ticks; - //unsigned int freq_target; - unsigned int freq_down_sampling_rate; - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); - struct cpufreq_policy *policy; - - if (!this_dbs_info->enable) - return; - - policy = this_dbs_info->cur_policy; - - /* - * The default safe range is 20% to 80% - * Every sampling_rate, we check - * - If current idle time is less than 20%, then we try to - * increase frequency - * Every sampling_rate*sampling_down_factor, we check - * - If current idle time is more than 80%, then we try to - * decrease frequency - * - */ - - this_dbs_info->down_skip++; - - /* Check for frequency increase */ - idle_ticks = UINT_MAX; - - /* Check for frequency increase */ - total_idle_ticks = get_cpu_idle_time(cpu); - tmp_idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_up; - this_dbs_info->prev_cpu_idle_up = total_idle_ticks; - - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; - - /* Scale idle ticks by 100 and compare with up and down ticks */ - idle_ticks *= 100; - up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * - usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - if (idle_ticks < up_idle_ticks) { - this_dbs_info->down_skip = 0; - this_dbs_info->prev_cpu_idle_down = - this_dbs_info->prev_cpu_idle_up; - - /* if we are already at full speed then break out early */ - if (this_dbs_info->requested_freq == policy->max) - return; - - this_dbs_info->requested_freq = policy->max; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, - CPUFREQ_RELATION_H); - return; - } - - /* Check for frequency decrease */ - if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor) - return; - else this_dbs_info->down_skip--; /* just to prevent overflow */ - - - /* Check for frequency decrease */ - total_idle_ticks = this_dbs_info->prev_cpu_idle_up; - tmp_idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_down; - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; - - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; - - /* Scale idle ticks by 100 and compare with up and down ticks */ - idle_ticks *= 100; - - freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * - dbs_tuners_ins.sampling_down_factor; - down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * - usecs_to_jiffies(freq_down_sampling_rate); - - if (idle_ticks > down_idle_ticks) { - /* - * if we are already at the lowest speed then break out early - * or if we 'cannot' reduce the speed as the user might want - * freq_target to be zero - */ - if (this_dbs_info->requested_freq == policy->min) - return; - - this_dbs_info->requested_freq = policy->min; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, - CPUFREQ_RELATION_H); - return; - } -} - -static void do_dbs_timer(struct work_struct *work) -{ - int i; - - mutex_lock(&dbs_mutex); - for_each_online_cpu(i) - dbs_check_cpu(i); - schedule_delayed_work(&dbs_work, - usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); - mutex_unlock(&dbs_mutex); -} - -static inline void dbs_timer_init(void) -{ - init_timer_deferrable(&dbs_work.timer); - schedule_delayed_work(&dbs_work, - usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); - return; -} - -static inline void dbs_timer_exit(void) -{ - cancel_delayed_work(&dbs_work); - return; -} - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - if (this_dbs_info->enable) /* Already enabled */ - break; - - mutex_lock(&dbs_mutex); - - rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); - j_dbs_info->prev_cpu_idle_down - = j_dbs_info->prev_cpu_idle_up; - } - this_dbs_info->enable = 1; - this_dbs_info->down_skip = 0; - this_dbs_info->requested_freq = policy->cur; - - dbs_enable++; - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - unsigned int latency; - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - - def_sampling_rate = 10 * latency * - CONFIG_CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER; - - if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) - def_sampling_rate = MIN_STAT_SAMPLING_RATE; - - dbs_tuners_ins.sampling_rate = def_sampling_rate; - - dbs_timer_init(); - cpufreq_register_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } - - mutex_unlock(&dbs_mutex); - break; - - case CPUFREQ_GOV_STOP: - mutex_lock(&dbs_mutex); - this_dbs_info->enable = 0; - sysfs_remove_group(&policy->kobj, &dbs_attr_group); - dbs_enable--; - /* - * Stop the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 0) { - dbs_timer_exit(); - cpufreq_unregister_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } - - mutex_unlock(&dbs_mutex); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&dbs_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&dbs_mutex); - break; - } - return 0; -} - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_MINMAX -static -#endif -struct cpufreq_governor cpufreq_gov_minmax = { - .name = "minmax", - .governor = cpufreq_governor_dbs, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, -}; - -static int __init cpufreq_gov_dbs_init(void) -{ - return cpufreq_register_governor(&cpufreq_gov_minmax); -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - /* Make sure that the scheduled work is indeed not running */ - flush_scheduled_work(); - - cpufreq_unregister_governor(&cpufreq_gov_minmax); -} - -MODULE_AUTHOR ("Erasmux"); -MODULE_DESCRIPTION ("'cpufreq_minmax' - A dynamic cpufreq governor which " - "minimizes the frequecy jumps by always selecting either " - "the minimal or maximal frequency"); -MODULE_LICENSE ("GPL"); - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_MINMAX -fs_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); -#endif -module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_smartass.c b/drivers/cpufreq/cpufreq_smartass.c deleted file mode 100644 index 0ba3ee61..00000000 --- a/drivers/cpufreq/cpufreq_smartass.c +++ /dev/null @@ -1,642 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_smartass.c - * - * Copyright (C) 2010 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Author: Erasmux - * - * Based on the interactive governor By Mike Chan (mike@android.com) - * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net) - * - * requires to add - * EXPORT_SYMBOL_GPL(nr_running); - * at the end of kernel/sched.c - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static void (*pm_idle_old)(void); -static atomic_t active_count = ATOMIC_INIT(0); - -struct smartass_info_s { - struct cpufreq_policy *cur_policy; - struct timer_list timer; - u64 time_in_idle; - u64 idle_exit_time; - unsigned int force_ramp_up; - unsigned int enable; -}; -static DEFINE_PER_CPU(struct smartass_info_s, smartass_info); - -/* Workqueues handle frequency scaling */ -static struct workqueue_struct *up_wq; -static struct workqueue_struct *down_wq; -static struct work_struct freq_scale_work; - -static u64 freq_change_time; -static u64 freq_change_time_in_idle; - -static cpumask_t work_cpumask; -static unsigned int suspended; - - -/* - * The minimum amount of time to spend at a frequency before we can ramp down, - * default is 45ms. - */ -#define DEFAULT_RAMP_DOWN_RATE_NS 45000; -static unsigned long ramp_down_rate_ns; - -/* - * When ramping up frequency jump to at least this frequency. - */ - -#define DEFAULT_UP_MIN_FREQ (800*1000) -static unsigned int up_min_freq; - -/* - * When sleep_max_freq>0 the frequency when suspended will be capped - * by this frequency. Also will wake up at max frequency of policy - * to minimize wakeup issues. - * Set sleep_max_freq=0 to disable this behavior. - */ -#define DEFAULT_SLEEP_MAX_FREQ (400*1000) -static unsigned int sleep_max_freq; - -/* - * Sampling rate, I highly recommend to leave it at 2. - */ -#define DEFAULT_SAMPLE_RATE_JIFFIES 2 -static unsigned int sample_rate_jiffies; - -/* - * Max freqeuncy delta when ramping up. - */ - -#define DEFAULT_MAX_RAMP_UP (300 * 1000) -static unsigned int max_ramp_up; - -/* - * CPU freq will be increased if measured load > max_cpu_load; - */ -#define DEFAULT_MAX_CPU_LOAD 60 -static unsigned long max_cpu_load; - -/* - * CPU freq will be decreased if measured load < min_cpu_load; - */ -#define DEFAULT_MIN_CPU_LOAD 30 -static unsigned long min_cpu_load; - -//Leave this zero by default, people can tweak it if they so wish. -#define DEFAULT_RAMP_UP_RATE_NS 0 -static unsigned long ramp_up_rate_ns; - - -static int cpufreq_governor_smartass(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS -static -#endif -struct cpufreq_governor cpufreq_gov_smartass = { - .name = "smartass", - .governor = cpufreq_governor_smartass, - .max_transition_latency = 9000000, - .owner = THIS_MODULE, -}; - -static void cpufreq_smartass_timer(unsigned long data) -{ - u64 delta_idle; - u64 update_time; - u64 now_idle; - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, data); - struct cpufreq_policy *policy = this_smartass->cur_policy; - - now_idle = get_cpu_idle_time_us(data, &update_time); - - if (update_time == this_smartass->idle_exit_time) - return; - - delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle); - //printk(KERN_INFO "smartass: t=%llu i=%llu\n",cputime64_sub(update_time,this_smartass->idle_exit_time),delta_idle); - - /* Scale up if there were no idle cycles since coming out of idle */ - if (delta_idle == 0 && cputime64_sub(update_time, freq_change_time) > ramp_up_rate_ns) { - if (policy->cur == policy->max) - return; - - if (nr_running() < 1) - return; - - this_smartass->force_ramp_up = 1; - cpumask_set_cpu(data, &work_cpumask); - queue_work(up_wq, &freq_scale_work); - return; - } - - /* - * There is a window where if the cpu utlization can go from low to high - * between the timer expiring, delta_idle will be > 0 and the cpu will - * be 100% busy, preventing idle from running, and this timer from - * firing. So setup another timer to fire to check cpu utlization. - * Do not setup the timer if there is no scheduled work. - */ - if (!timer_pending(&this_smartass->timer) && nr_running() > 0) { - this_smartass->time_in_idle = get_cpu_idle_time_us( - data, &this_smartass->idle_exit_time); - mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies); - } - - if (policy->cur == policy->min) - return; - - /* - * Do not scale down unless we have been at this frequency for the - * minimum sample time. - */ - if (cputime64_sub(update_time, freq_change_time) < ramp_down_rate_ns) - return; - - - cpumask_set_cpu(data, &work_cpumask); - queue_work(down_wq, &freq_scale_work); -} - -static void cpufreq_idle(void) -{ - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); - struct cpufreq_policy *policy = this_smartass->cur_policy; - - pm_idle_old(); - - if (!cpumask_test_cpu(smp_processor_id(), policy->cpus)) - return; - - /* Timer to fire in 1-2 ticks, jiffie aligned. */ - if (timer_pending(&this_smartass->timer) == 0) { - this_smartass->time_in_idle = get_cpu_idle_time_us( - smp_processor_id(), &this_smartass->idle_exit_time); - mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies); - } -} - -/* - * Choose the cpu frequency based off the load. For now choose the minimum - * frequency that will satisfy the load, which is no -t always the lower power. - */ -static unsigned int cpufreq_smartass_calc_freq(unsigned int cpu, struct cpufreq_policy *policy) -{ - unsigned int delta_time; - unsigned int idle_time; - unsigned int cpu_load; - unsigned int new_freq; - u64 current_wall_time; - u64 current_idle_time; - - - current_idle_time = get_cpu_idle_time_us(cpu, ¤t_wall_time); - - idle_time = (unsigned int)( current_idle_time - freq_change_time_in_idle ); - delta_time = (unsigned int)( current_wall_time - freq_change_time ); - - cpu_load = 100 * (delta_time - idle_time) / delta_time; - if (cpu_load < min_cpu_load) { - //if the current frequency is below 1.2ghz, everything is 200mhz steps - if(policy->cur <= 1200000 && policy->cur >= 400000) { -/* catch the extra 200mhz gap between 400 and 800 when scaling down -netarchy */ - if(policy->cur == 800000) { - new_freq = policy->cur - 400000; - return new_freq; - } - else { - new_freq = policy->cur - 200000; - return new_freq; - } - } - //above 1.2ghz though, everything is 100mhz steps - else { - new_freq = policy->cur - 100000; - return new_freq; - } - } - if (cpu_load > max_cpu_load) { - if(policy->cur < 1200000 && policy->cur > 100000) { -/* catch the gap between 400 and 800 when scaling up -netarchy */ - if(policy->cur == 400000) { - new_freq = policy->cur + 400000; - return new_freq; - } - else { - new_freq = policy->cur + 200000; - return new_freq; - } - } - else { - new_freq = policy->cur + 100000; - return new_freq; - } - } - return policy->cur; -} - -/* We use the same work function to sale up and down */ -static void cpufreq_smartass_freq_change_time_work(struct work_struct *work) -{ - unsigned int cpu; - unsigned int new_freq; - struct smartass_info_s *this_smartass; - struct cpufreq_policy *policy; - cpumask_t tmp_mask = work_cpumask; - for_each_cpu(cpu, tmp_mask) { - this_smartass = &per_cpu(smartass_info, cpu); - policy = this_smartass->cur_policy; - - if (this_smartass->force_ramp_up) { - this_smartass->force_ramp_up = 0; - - if (nr_running() == 1) { - cpumask_clear_cpu(cpu, &work_cpumask); - return; - } - - if (policy->cur == policy->max) - return; - - new_freq = policy->cur + max_ramp_up; - - if (suspended && sleep_max_freq) { - if (new_freq > sleep_max_freq) - new_freq = sleep_max_freq; - } else { - if (new_freq < up_min_freq) - new_freq = up_min_freq; - } - - } else { - new_freq = cpufreq_smartass_calc_freq(cpu,policy); - - // in suspend limit to sleep_max_freq and - // jump straight to sleep_max_freq to avoid wakeup problems - if (suspended && sleep_max_freq && - (new_freq > sleep_max_freq || new_freq > policy->cur)) - new_freq = sleep_max_freq; - } - - if (new_freq > policy->max) - new_freq = policy->max; - - if (new_freq < policy->min) - new_freq = policy->min; - - __cpufreq_driver_target(policy, new_freq, - CPUFREQ_RELATION_L); - - freq_change_time_in_idle = get_cpu_idle_time_us(cpu, - &freq_change_time); - - cpumask_clear_cpu(cpu, &work_cpumask); - - } - - -} - -static ssize_t show_ramp_up_rate_ns(struct cpufreq_policy *policy, char *buf) { - return sprintf(buf, "%lu\n", ramp_up_rate_ns); -} - -static ssize_t store_ramp_up_rate_ns(struct cpufreq_policy *policy, const char *buf, size_t count) { - ssize_t ret; - unsigned long input; - ret = strict_strtoul(buf, 0, &input); - if (ret >= 0 && input >= 0 && input <= 100000000) - ramp_up_rate_ns = input; - return ret; -} - -static struct freq_attr ramp_up_rate_ns_attr = __ATTR(ramp_up_rate_ns, 0644, - show_ramp_up_rate_ns, store_ramp_up_rate_ns); - -static ssize_t show_ramp_down_rate_ns(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%lu\n", ramp_down_rate_ns); -} - -static ssize_t store_ramp_down_rate_ns(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 1000 && input <= 100000000) - ramp_down_rate_ns = input; - return res; -} - -static struct freq_attr ramp_down_rate_ns_attr = __ATTR(ramp_down_rate_ns, 0644, - show_ramp_down_rate_ns, store_ramp_down_rate_ns); - -static ssize_t show_up_min_freq(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", up_min_freq); -} - -static ssize_t store_up_min_freq(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - up_min_freq = input; - return res; -} - -static struct freq_attr up_min_freq_attr = __ATTR(up_min_freq, 0644, - show_up_min_freq, store_up_min_freq); - -static ssize_t show_sleep_max_freq(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", sleep_max_freq); -} - -static ssize_t store_sleep_max_freq(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - sleep_max_freq = input; - return res; -} - -static struct freq_attr sleep_max_freq_attr = __ATTR(sleep_max_freq, 0644, - show_sleep_max_freq, store_sleep_max_freq); - -static ssize_t show_sample_rate_jiffies(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", sample_rate_jiffies); -} - -static ssize_t store_sample_rate_jiffies(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input <= 1000) - sample_rate_jiffies = input; - return res; -} - -static struct freq_attr sample_rate_jiffies_attr = __ATTR(sample_rate_jiffies, 0644, - show_sample_rate_jiffies, store_sample_rate_jiffies); - -static ssize_t show_max_ramp_up(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%u\n", max_ramp_up); -} - -static ssize_t store_max_ramp_up(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 10000) - max_ramp_up = input; - return res; -} - -static struct freq_attr max_ramp_up_attr = __ATTR(max_ramp_up, 0644, - show_max_ramp_up, store_max_ramp_up); - -static ssize_t show_max_cpu_load(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%lu\n", max_cpu_load); -} - -static ssize_t store_max_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input <= 100) - max_cpu_load = input; - return res; -} - -static struct freq_attr max_cpu_load_attr = __ATTR(max_cpu_load, 0644, - show_max_cpu_load, store_max_cpu_load); - -static ssize_t show_min_cpu_load(struct cpufreq_policy *policy, char *buf) -{ - return sprintf(buf, "%lu\n", min_cpu_load); -} - -static ssize_t store_min_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input < 100) - min_cpu_load = input; - return res; -} - -static struct freq_attr min_cpu_load_attr = __ATTR(min_cpu_load, 0644, - show_min_cpu_load, store_min_cpu_load); - -static struct attribute * smartass_attributes[] = { - &ramp_down_rate_ns_attr.attr, - &up_min_freq_attr.attr, - &sleep_max_freq_attr.attr, - &sample_rate_jiffies_attr.attr, - &max_ramp_up_attr.attr, - &max_cpu_load_attr.attr, - &min_cpu_load_attr.attr, - &ramp_up_rate_ns_attr.attr, - NULL, -}; - -static struct attribute_group smartass_attr_group = { - .attrs = smartass_attributes, - .name = "smartass", -}; - -static int cpufreq_governor_smartass(struct cpufreq_policy *new_policy, - unsigned int event) -{ - unsigned int cpu = new_policy->cpu; - int rc; - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!new_policy->cur)) - return -EINVAL; - - if (this_smartass->enable) /* Already enabled */ - break; - - /* - * Do not register the idle hook and create sysfs - * entries if we have already done so. - */ - if (atomic_inc_return(&active_count) > 1) - return 0; - - rc = sysfs_create_group(&new_policy->kobj, &smartass_attr_group); - if (rc) - return rc; - pm_idle_old = pm_idle; - pm_idle = cpufreq_idle; - - this_smartass->cur_policy = new_policy; - this_smartass->enable = 1; - - // notice no break here! - - case CPUFREQ_GOV_LIMITS: - if (this_smartass->cur_policy->cur != new_policy->max) - __cpufreq_driver_target(new_policy, new_policy->max, CPUFREQ_RELATION_H); - - break; - - case CPUFREQ_GOV_STOP: - this_smartass->enable = 0; - - if (atomic_dec_return(&active_count) > 1) - return 0; - sysfs_remove_group(&new_policy->kobj, - &smartass_attr_group); - - pm_idle = pm_idle_old; - del_timer(&this_smartass->timer); - break; - } - - return 0; -} - -static void smartass_suspend(int cpu, int suspend) -{ - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); - struct cpufreq_policy *policy = this_smartass->cur_policy; - unsigned int new_freq; - - if (!this_smartass->enable || sleep_max_freq==0) // disable behavior for sleep_max_freq==0 - return; - - if (suspend) { - if (policy->cur > sleep_max_freq) { - new_freq = sleep_max_freq; - if (new_freq > policy->max) - new_freq = policy->max; - if (new_freq < policy->min) - new_freq = policy->min; - __cpufreq_driver_target(policy, new_freq, - CPUFREQ_RELATION_H); - } - } else { // resume at max speed: - __cpufreq_driver_target(policy, policy->max, - CPUFREQ_RELATION_H); - } - -} - -static void smartass_early_suspend(struct early_suspend *handler) { - int i; - suspended = 1; - for_each_online_cpu(i) - smartass_suspend(i,1); -} - -static void smartass_late_resume(struct early_suspend *handler) { - int i; - suspended = 0; - for_each_online_cpu(i) - smartass_suspend(i,0); -} - -static struct early_suspend smartass_power_suspend = { - .suspend = smartass_early_suspend, - .resume = smartass_late_resume, -}; - -static int __init cpufreq_smartass_init(void) -{ - unsigned int i; - struct smartass_info_s *this_smartass; - ramp_down_rate_ns = DEFAULT_RAMP_DOWN_RATE_NS; - up_min_freq = DEFAULT_UP_MIN_FREQ; - sleep_max_freq = DEFAULT_SLEEP_MAX_FREQ; - sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES; - max_ramp_up = DEFAULT_MAX_RAMP_UP; - max_cpu_load = DEFAULT_MAX_CPU_LOAD; - min_cpu_load = DEFAULT_MIN_CPU_LOAD; - ramp_up_rate_ns = DEFAULT_RAMP_UP_RATE_NS; - - suspended = 0; - - /* Initalize per-cpu data: */ - for_each_possible_cpu(i) { - this_smartass = &per_cpu(smartass_info, i); - this_smartass->enable = 0; - this_smartass->force_ramp_up = 0; - this_smartass->time_in_idle = 0; - this_smartass->idle_exit_time = 0; - // intialize timer: - init_timer_deferrable(&this_smartass->timer); - this_smartass->timer.function = cpufreq_smartass_timer; - this_smartass->timer.data = i; - } - - /* Scale up is high priority */ - up_wq = create_workqueue("ksmartass_up"); - down_wq = create_workqueue("ksmartass_down"); - - INIT_WORK(&freq_scale_work, cpufreq_smartass_freq_change_time_work); - - register_early_suspend(&smartass_power_suspend); - - return cpufreq_register_governor(&cpufreq_gov_smartass); -} - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS -pure_initcall(cpufreq_smartass_init); -#else -module_init(cpufreq_smartass_init); -#endif - -static void __exit cpufreq_smartass_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_smartass); - destroy_workqueue(up_wq); - destroy_workqueue(down_wq); -} - -module_exit(cpufreq_smartass_exit); - -MODULE_AUTHOR ("Erasmux"); -MODULE_DESCRIPTION ("'cpufreq_minmax' - A smart cpufreq governor optimized for the hero!"); -MODULE_LICENSE ("GPL"); - diff --git a/drivers/cpufreq/cpufreq_smartass2.c b/drivers/cpufreq/cpufreq_smartass2.c deleted file mode 100644 index 05c39ded..00000000 --- a/drivers/cpufreq/cpufreq_smartass2.c +++ /dev/null @@ -1,868 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_smartass2.c - * - * Copyright (C) 2010 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Author: Erasmux - * - * Based on the interactive governor By Mike Chan (mike@android.com) - * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net) - * - * SMP support based on mod by faux123 - * - * For a general overview of smartassV2 see the relavent part in - * Documentation/cpu-freq/governors.txt - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -/******************** Tunable parameters: ********************/ - -/* - * The "ideal" frequency to use when awake. The governor will ramp up faster - * towards the ideal frequency and slower after it has passed it. Similarly, - * lowering the frequency towards the ideal frequency is faster than below it. - */ -#define DEFAULT_AWAKE_IDEAL_FREQ 800000 -static unsigned int awake_ideal_freq; - -/* - * The "ideal" frequency to use when suspended. - * When set to 0, the governor will not track the suspended state (meaning - * that practically when sleep_ideal_freq==0 the awake_ideal_freq is used - * also when suspended). - */ -#define DEFAULT_SLEEP_IDEAL_FREQ 100000 -static unsigned int sleep_ideal_freq; - -/* - * Freqeuncy delta when ramping up above the ideal freqeuncy. - * Zero disables and causes to always jump straight to max frequency. - * When below the ideal freqeuncy we always ramp up to the ideal freq. - */ -#define DEFAULT_RAMP_UP_STEP 256000 -static unsigned int ramp_up_step; - -/* - * Freqeuncy delta when ramping down below the ideal freqeuncy. - * Zero disables and will calculate ramp down according to load heuristic. - * When above the ideal freqeuncy we always ramp down to the ideal freq. - */ -#define DEFAULT_RAMP_DOWN_STEP 256000 -static unsigned int ramp_down_step; - -/* - * CPU freq will be increased if measured load > max_cpu_load; - */ -#define DEFAULT_MAX_CPU_LOAD 50 -static unsigned long max_cpu_load; - -/* - * CPU freq will be decreased if measured load < min_cpu_load; - */ -#define DEFAULT_MIN_CPU_LOAD 25 -static unsigned long min_cpu_load; - -/* - * The minimum amount of time to spend at a frequency before we can ramp up. - * Notice we ignore this when we are below the ideal frequency. - */ -#define DEFAULT_UP_RATE_US 48000; -static unsigned long up_rate_us; - -/* - * The minimum amount of time to spend at a frequency before we can ramp down. - * Notice we ignore this when we are above the ideal frequency. - */ -#define DEFAULT_DOWN_RATE_US 99000; -static unsigned long down_rate_us; - -/* - * The frequency to set when waking up from sleep. - * When sleep_ideal_freq=0 this will have no effect. - */ -#define DEFAULT_SLEEP_WAKEUP_FREQ 99999999 -static unsigned int sleep_wakeup_freq; - -/* - * Sampling rate, I highly recommend to leave it at 2. - */ -#define DEFAULT_SAMPLE_RATE_JIFFIES 2 -static unsigned int sample_rate_jiffies; - - -/*************** End of tunables ***************/ - - -static void (*pm_idle_old)(void); -static atomic_t active_count = ATOMIC_INIT(0); - -struct smartass_info_s { - struct cpufreq_policy *cur_policy; - struct cpufreq_frequency_table *freq_table; - struct timer_list timer; - u64 time_in_idle; - u64 idle_exit_time; - u64 freq_change_time; - u64 freq_change_time_in_idle; - int cur_cpu_load; - int old_freq; - int ramp_dir; - unsigned int enable; - int ideal_speed; -}; -static DEFINE_PER_CPU(struct smartass_info_s, smartass_info); - -/* Workqueues handle frequency scaling */ -static struct workqueue_struct *up_wq; -static struct workqueue_struct *down_wq; -static struct work_struct freq_scale_work; - -static cpumask_t work_cpumask; -static spinlock_t cpumask_lock; - -static unsigned int suspended; - -#define dprintk(flag,msg...) do { \ - if (debug_mask & flag) printk(KERN_DEBUG msg); \ - } while (0) - -enum { - SMARTASS_DEBUG_JUMPS=1, - SMARTASS_DEBUG_LOAD=2, - SMARTASS_DEBUG_ALG=4 -}; - -/* - * Combination of the above debug flags. - */ -static unsigned long debug_mask; - -static int cpufreq_governor_smartass(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 -static -#endif -struct cpufreq_governor cpufreq_gov_smartass2 = { - .name = "smartassV2", - .governor = cpufreq_governor_smartass, - .max_transition_latency = 9000000, - .owner = THIS_MODULE, -}; - -inline static void smartass_update_min_max(struct smartass_info_s *this_smartass, struct cpufreq_policy *policy, int suspend) { - if (suspend) { - this_smartass->ideal_speed = // sleep_ideal_freq; but make sure it obeys the policy min/max - policy->max > sleep_ideal_freq ? - (sleep_ideal_freq > policy->min ? sleep_ideal_freq : policy->min) : policy->max; - } else { - this_smartass->ideal_speed = // awake_ideal_freq; but make sure it obeys the policy min/max - policy->min < awake_ideal_freq ? - (awake_ideal_freq < policy->max ? awake_ideal_freq : policy->max) : policy->min; - } -} - -inline static void smartass_update_min_max_allcpus(void) { - unsigned int i; - for_each_online_cpu(i) { - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, i); - if (this_smartass->enable) - smartass_update_min_max(this_smartass,this_smartass->cur_policy,suspended); - } -} - -inline static unsigned int validate_freq(struct cpufreq_policy *policy, int freq) { - if (freq > (int)policy->max) - return policy->max; - if (freq < (int)policy->min) - return policy->min; - return freq; -} - -inline static void reset_timer(unsigned long cpu, struct smartass_info_s *this_smartass) { - this_smartass->time_in_idle = get_cpu_idle_time_us(cpu, &this_smartass->idle_exit_time); - mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies); -} - -inline static void work_cpumask_set(unsigned long cpu) { - unsigned long flags; - spin_lock_irqsave(&cpumask_lock, flags); - cpumask_set_cpu(cpu, &work_cpumask); - spin_unlock_irqrestore(&cpumask_lock, flags); -} - -inline static int work_cpumask_test_and_clear(unsigned long cpu) { - unsigned long flags; - int res = 0; - spin_lock_irqsave(&cpumask_lock, flags); - res = cpumask_test_and_clear_cpu(cpu, &work_cpumask); - spin_unlock_irqrestore(&cpumask_lock, flags); - return res; -} - -inline static int target_freq(struct cpufreq_policy *policy, struct smartass_info_s *this_smartass, - int new_freq, int old_freq, int prefered_relation) { - int index, target; - struct cpufreq_frequency_table *table = this_smartass->freq_table; - - if (new_freq == old_freq) - return 0; - new_freq = validate_freq(policy,new_freq); - if (new_freq == old_freq) - return 0; - - if (table && - !cpufreq_frequency_table_target(policy,table,new_freq,prefered_relation,&index)) - { - target = table[index].frequency; - if (target == old_freq) { - // if for example we are ramping up to *at most* current + ramp_up_step - // but there is no such frequency higher than the current, try also - // to ramp up to *at least* current + ramp_up_step. - if (new_freq > old_freq && prefered_relation==CPUFREQ_RELATION_H - && !cpufreq_frequency_table_target(policy,table,new_freq, - CPUFREQ_RELATION_L,&index)) - target = table[index].frequency; - // simlarly for ramping down: - else if (new_freq < old_freq && prefered_relation==CPUFREQ_RELATION_L - && !cpufreq_frequency_table_target(policy,table,new_freq, - CPUFREQ_RELATION_H,&index)) - target = table[index].frequency; - } - - if (target == old_freq) { - // We should not get here: - // If we got here we tried to change to a validated new_freq which is different - // from old_freq, so there is no reason for us to remain at same frequency. - printk(KERN_WARNING "Smartass: frequency change failed: %d to %d => %d\n", - old_freq,new_freq,target); - return 0; - } - } - else target = new_freq; - - __cpufreq_driver_target(policy, target, prefered_relation); - - dprintk(SMARTASS_DEBUG_JUMPS,"SmartassQ: jumping from %d to %d => %d (%d)\n", - old_freq,new_freq,target,policy->cur); - - return target; -} - -static void cpufreq_smartass_timer(unsigned long cpu) -{ - u64 delta_idle; - u64 delta_time; - int cpu_load; - int old_freq; - u64 update_time; - u64 now_idle; - int queued_work = 0; - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); - struct cpufreq_policy *policy = this_smartass->cur_policy; - - now_idle = get_cpu_idle_time_us(cpu, &update_time); - old_freq = policy->cur; - - if (this_smartass->idle_exit_time == 0 || update_time == this_smartass->idle_exit_time) - return; - - delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle); - delta_time = cputime64_sub(update_time, this_smartass->idle_exit_time); - - // If timer ran less than 1ms after short-term sample started, retry. - if (delta_time < 1000) { - if (!timer_pending(&this_smartass->timer)) - reset_timer(cpu,this_smartass); - return; - } - - if (delta_idle > delta_time) - cpu_load = 0; - else - cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time; - - dprintk(SMARTASS_DEBUG_LOAD,"smartassT @ %d: load %d (delta_time %llu)\n", - old_freq,cpu_load,delta_time); - - this_smartass->cur_cpu_load = cpu_load; - this_smartass->old_freq = old_freq; - - // Scale up if load is above max or if there where no idle cycles since coming out of idle, - // additionally, if we are at or above the ideal_speed, verify we have been at this frequency - // for at least up_rate_us: - if (cpu_load > max_cpu_load || delta_idle == 0) - { - if (old_freq < policy->max && - (old_freq < this_smartass->ideal_speed || delta_idle == 0 || - cputime64_sub(update_time, this_smartass->freq_change_time) >= up_rate_us)) - { - dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp up: load %d (delta_idle %llu)\n", - old_freq,cpu_load,delta_idle); - this_smartass->ramp_dir = 1; - work_cpumask_set(cpu); - queue_work(up_wq, &freq_scale_work); - queued_work = 1; - } - else this_smartass->ramp_dir = 0; - } - // Similarly for scale down: load should be below min and if we are at or below ideal - // frequency we require that we have been at this frequency for at least down_rate_us: - else if (cpu_load < min_cpu_load && old_freq > policy->min && - (old_freq > this_smartass->ideal_speed || - cputime64_sub(update_time, this_smartass->freq_change_time) >= down_rate_us)) - { - dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp down: load %d (delta_idle %llu)\n", - old_freq,cpu_load,delta_idle); - this_smartass->ramp_dir = -1; - work_cpumask_set(cpu); - queue_work(down_wq, &freq_scale_work); - queued_work = 1; - } - else this_smartass->ramp_dir = 0; - - // To avoid unnecessary load when the CPU is already at high load, we don't - // reset ourselves if we are at max speed. If and when there are idle cycles, - // the idle loop will activate the timer. - // Additionally, if we queued some work, the work task will reset the timer - // after it has done its adjustments. - if (!queued_work && old_freq < policy->max) - reset_timer(cpu,this_smartass); -} - -static void cpufreq_idle(void) -{ - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); - struct cpufreq_policy *policy = this_smartass->cur_policy; - - if (!this_smartass->enable) { - pm_idle_old(); - return; - } - - if (policy->cur == policy->min && timer_pending(&this_smartass->timer)) - del_timer(&this_smartass->timer); - - pm_idle_old(); - - if (!timer_pending(&this_smartass->timer)) - reset_timer(smp_processor_id(), this_smartass); -} - -/* We use the same work function to sale up and down */ -static void cpufreq_smartass_freq_change_time_work(struct work_struct *work) -{ - unsigned int cpu; - int new_freq; - int old_freq; - int ramp_dir; - struct smartass_info_s *this_smartass; - struct cpufreq_policy *policy; - unsigned int relation = CPUFREQ_RELATION_L; - for_each_possible_cpu(cpu) { - this_smartass = &per_cpu(smartass_info, cpu); - if (!work_cpumask_test_and_clear(cpu)) - continue; - - ramp_dir = this_smartass->ramp_dir; - this_smartass->ramp_dir = 0; - - old_freq = this_smartass->old_freq; - policy = this_smartass->cur_policy; - - if (old_freq != policy->cur) { - // frequency was changed by someone else? - printk(KERN_WARNING "Smartass: frequency changed by 3rd party: %d to %d\n", - old_freq,policy->cur); - new_freq = old_freq; - } - else if (ramp_dir > 0 && nr_running() > 1) { - // ramp up logic: - if (old_freq < this_smartass->ideal_speed) - new_freq = this_smartass->ideal_speed; - else if (ramp_up_step) { - new_freq = old_freq + ramp_up_step; - relation = CPUFREQ_RELATION_H; - } - else { - new_freq = policy->max; - relation = CPUFREQ_RELATION_H; - } - dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp up: ramp_dir=%d ideal=%d\n", - old_freq,ramp_dir,this_smartass->ideal_speed); - } - else if (ramp_dir < 0) { - // ramp down logic: - if (old_freq > this_smartass->ideal_speed) { - new_freq = this_smartass->ideal_speed; - relation = CPUFREQ_RELATION_H; - } - else if (ramp_down_step) - new_freq = old_freq - ramp_down_step; - else { - // Load heuristics: Adjust new_freq such that, assuming a linear - // scaling of load vs. frequency, the load in the new frequency - // will be max_cpu_load: - new_freq = old_freq * this_smartass->cur_cpu_load / max_cpu_load; - if (new_freq > old_freq) // min_cpu_load > max_cpu_load ?! - new_freq = old_freq -1; - } - dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp down: ramp_dir=%d ideal=%d\n", - old_freq,ramp_dir,this_smartass->ideal_speed); - } - else { // ramp_dir==0 ?! Could the timer change its mind about a queued ramp up/down - // before the work task gets to run? - // This may also happen if we refused to ramp up because the nr_running()==1 - new_freq = old_freq; - dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d nothing: ramp_dir=%d nr_running=%lu\n", - old_freq,ramp_dir,nr_running()); - } - - // do actual ramp up (returns 0, if frequency change failed): - new_freq = target_freq(policy,this_smartass,new_freq,old_freq,relation); - if (new_freq) - this_smartass->freq_change_time_in_idle = - get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time); - - // reset timer: - if (new_freq < policy->max) - reset_timer(cpu,this_smartass); - // if we are maxed out, it is pointless to use the timer - // (idle cycles wake up the timer when the timer comes) - else if (timer_pending(&this_smartass->timer)) - del_timer(&this_smartass->timer); - } -} - -static ssize_t show_debug_mask(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", debug_mask); -} - -static ssize_t store_debug_mask(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0) - debug_mask = input; - return res; -} - -static ssize_t show_up_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", up_rate_us); -} - -static ssize_t store_up_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0 && input <= 100000000) - up_rate_us = input; - return res; -} - -static ssize_t show_down_rate_us(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", down_rate_us); -} - -static ssize_t store_down_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0 && input <= 100000000) - down_rate_us = input; - return res; -} - -static ssize_t show_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", sleep_ideal_freq); -} - -static ssize_t store_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) { - sleep_ideal_freq = input; - if (suspended) - smartass_update_min_max_allcpus(); - } - return res; -} - -static ssize_t show_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", sleep_wakeup_freq); -} - -static ssize_t store_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - sleep_wakeup_freq = input; - return res; -} - -static ssize_t show_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", awake_ideal_freq); -} - -static ssize_t store_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) { - awake_ideal_freq = input; - if (!suspended) - smartass_update_min_max_allcpus(); - } - return res; -} - -static ssize_t show_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", sample_rate_jiffies); -} - -static ssize_t store_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input <= 1000) - sample_rate_jiffies = input; - return res; -} - -static ssize_t show_ramp_up_step(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", ramp_up_step); -} - -static ssize_t store_ramp_up_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - ramp_up_step = input; - return res; -} - -static ssize_t show_ramp_down_step(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", ramp_down_step); -} - -static ssize_t store_ramp_down_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input >= 0) - ramp_down_step = input; - return res; -} - -static ssize_t show_max_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", max_cpu_load); -} - -static ssize_t store_max_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input <= 100) - max_cpu_load = input; - return res; -} - -static ssize_t show_min_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return sprintf(buf, "%lu\n", min_cpu_load); -} - -static ssize_t store_min_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) -{ - ssize_t res; - unsigned long input; - res = strict_strtoul(buf, 0, &input); - if (res >= 0 && input > 0 && input < 100) - min_cpu_load = input; - return res; -} - -#define define_global_rw_attr(_name) \ -static struct global_attr _name##_attr = \ - __ATTR(_name, 0644, show_##_name, store_##_name) - -define_global_rw_attr(debug_mask); -define_global_rw_attr(up_rate_us); -define_global_rw_attr(down_rate_us); -define_global_rw_attr(sleep_ideal_freq); -define_global_rw_attr(sleep_wakeup_freq); -define_global_rw_attr(awake_ideal_freq); -define_global_rw_attr(sample_rate_jiffies); -define_global_rw_attr(ramp_up_step); -define_global_rw_attr(ramp_down_step); -define_global_rw_attr(max_cpu_load); -define_global_rw_attr(min_cpu_load); - -static struct attribute * smartass_attributes[] = { - &debug_mask_attr.attr, - &up_rate_us_attr.attr, - &down_rate_us_attr.attr, - &sleep_ideal_freq_attr.attr, - &sleep_wakeup_freq_attr.attr, - &awake_ideal_freq_attr.attr, - &sample_rate_jiffies_attr.attr, - &ramp_up_step_attr.attr, - &ramp_down_step_attr.attr, - &max_cpu_load_attr.attr, - &min_cpu_load_attr.attr, - NULL, -}; - -static struct attribute_group smartass_attr_group = { - .attrs = smartass_attributes, - .name = "smartass", -}; - -static int cpufreq_governor_smartass(struct cpufreq_policy *new_policy, - unsigned int event) -{ - unsigned int cpu = new_policy->cpu; - int rc; - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!new_policy->cur)) - return -EINVAL; - - this_smartass->cur_policy = new_policy; - - this_smartass->enable = 1; - - smartass_update_min_max(this_smartass,new_policy,suspended); - - this_smartass->freq_table = cpufreq_frequency_get_table(cpu); - if (!this_smartass->freq_table) - printk(KERN_WARNING "Smartass: no frequency table for cpu %d?!\n",cpu); - - smp_wmb(); - - // Do not register the idle hook and create sysfs - // entries if we have already done so. - if (atomic_inc_return(&active_count) <= 1) { - rc = sysfs_create_group(cpufreq_global_kobject, - &smartass_attr_group); - if (rc) - return rc; - - pm_idle_old = pm_idle; - pm_idle = cpufreq_idle; - } - - if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer)) - reset_timer(cpu,this_smartass); - - break; - - case CPUFREQ_GOV_LIMITS: - smartass_update_min_max(this_smartass,new_policy,suspended); - - if (this_smartass->cur_policy->cur > new_policy->max) { - dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new max freq: %d\n",new_policy->max); - __cpufreq_driver_target(this_smartass->cur_policy, - new_policy->max, CPUFREQ_RELATION_H); - } - else if (this_smartass->cur_policy->cur < new_policy->min) { - dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new min freq: %d\n",new_policy->min); - __cpufreq_driver_target(this_smartass->cur_policy, - new_policy->min, CPUFREQ_RELATION_L); - } - - if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer)) - reset_timer(cpu,this_smartass); - - break; - - case CPUFREQ_GOV_STOP: - this_smartass->enable = 0; - smp_wmb(); - del_timer(&this_smartass->timer); - flush_work(&freq_scale_work); - this_smartass->idle_exit_time = 0; - - if (atomic_dec_return(&active_count) <= 1) { - sysfs_remove_group(cpufreq_global_kobject, - &smartass_attr_group); - pm_idle = pm_idle_old; - } - break; - } - - return 0; -} - -static void smartass_suspend(int cpu, int suspend) -{ - struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id()); - struct cpufreq_policy *policy = this_smartass->cur_policy; - unsigned int new_freq; - - if (!this_smartass->enable) - return; - - smartass_update_min_max(this_smartass,policy,suspend); - if (!suspend) { // resume at max speed: - new_freq = validate_freq(policy,sleep_wakeup_freq); - - dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: awaking at %d\n",new_freq); - - __cpufreq_driver_target(policy, new_freq, - CPUFREQ_RELATION_L); - } else { - // to avoid wakeup issues with quick sleep/wakeup don't change actual frequency when entering sleep - // to allow some time to settle down. Instead we just reset our statistics (and reset the timer). - // Eventually, the timer will adjust the frequency if necessary. - - this_smartass->freq_change_time_in_idle = - get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time); - - dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: suspending at %d\n",policy->cur); - } - - reset_timer(smp_processor_id(),this_smartass); -} - -static void smartass_early_suspend(struct early_suspend *handler) { - int i; - if (suspended || sleep_ideal_freq==0) // disable behavior for sleep_ideal_freq==0 - return; - suspended = 1; - for_each_online_cpu(i) - smartass_suspend(i,1); -} - -static void smartass_late_resume(struct early_suspend *handler) { - int i; - if (!suspended) // already not suspended so nothing to do - return; - suspended = 0; - for_each_online_cpu(i) - smartass_suspend(i,0); -} - -static struct early_suspend smartass_power_suspend = { - .suspend = smartass_early_suspend, - .resume = smartass_late_resume, -#ifdef CONFIG_MACH_HERO - .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1, -#endif -}; - -static int __init cpufreq_smartass_init(void) -{ - unsigned int i; - struct smartass_info_s *this_smartass; - debug_mask = 0; - up_rate_us = DEFAULT_UP_RATE_US; - down_rate_us = DEFAULT_DOWN_RATE_US; - sleep_ideal_freq = DEFAULT_SLEEP_IDEAL_FREQ; - sleep_wakeup_freq = DEFAULT_SLEEP_WAKEUP_FREQ; - awake_ideal_freq = DEFAULT_AWAKE_IDEAL_FREQ; - sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES; - ramp_up_step = DEFAULT_RAMP_UP_STEP; - ramp_down_step = DEFAULT_RAMP_DOWN_STEP; - max_cpu_load = DEFAULT_MAX_CPU_LOAD; - min_cpu_load = DEFAULT_MIN_CPU_LOAD; - - spin_lock_init(&cpumask_lock); - - suspended = 0; - - /* Initalize per-cpu data: */ - for_each_possible_cpu(i) { - this_smartass = &per_cpu(smartass_info, i); - this_smartass->enable = 0; - this_smartass->cur_policy = 0; - this_smartass->ramp_dir = 0; - this_smartass->time_in_idle = 0; - this_smartass->idle_exit_time = 0; - this_smartass->freq_change_time = 0; - this_smartass->freq_change_time_in_idle = 0; - this_smartass->cur_cpu_load = 0; - // intialize timer: - init_timer_deferrable(&this_smartass->timer); - this_smartass->timer.function = cpufreq_smartass_timer; - this_smartass->timer.data = i; - work_cpumask_test_and_clear(i); - } - - // Scale up is high priority - up_wq = alloc_workqueue("ksmartass_up", WQ_HIGHPRI, 1); - down_wq = alloc_workqueue("ksmartass_down", 0, 1); - if (!up_wq || !down_wq) - return -ENOMEM; - - INIT_WORK(&freq_scale_work, cpufreq_smartass_freq_change_time_work); - - register_early_suspend(&smartass_power_suspend); - - return cpufreq_register_governor(&cpufreq_gov_smartass2); -} - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 -fs_initcall(cpufreq_smartass_init); -#else -module_init(cpufreq_smartass_init); -#endif - -static void __exit cpufreq_smartass_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_smartass2); - destroy_workqueue(up_wq); - destroy_workqueue(down_wq); -} - -module_exit(cpufreq_smartass_exit); - -MODULE_AUTHOR ("Erasmux"); -MODULE_DESCRIPTION ("'cpufreq_smartass2' - A smart cpufreq governor"); -MODULE_LICENSE ("GPL"); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index a301beed..986b0aac 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -25,9 +24,6 @@ #define CPUFREQ_NAME_LEN 16 -/********************************************************************* - * CPUFREQ NOTIFIER INTERFACE * - *********************************************************************/ #define CPUFREQ_TRANSITION_NOTIFIER (0) #define CPUFREQ_POLICY_NOTIFIER (1) @@ -50,10 +46,6 @@ static inline int cpufreq_unregister_notifier(struct notifier_block *nb, static inline void disable_cpufreq(void) { } #endif -/* if (cpufreq_driver->target) exists, the ->governor decides what frequency - * within the limits is used. If (cpufreq_driver->setpolicy> exists, these - * two generic policies are available: - */ #define CPUFREQ_POLICY_POWERSAVE (1) #define CPUFREQ_POLICY_PERFORMANCE (2) @@ -63,7 +55,6 @@ static inline void disable_cpufreq(void) { } struct cpufreq_governor; -/* /sys/devices/system/cpu/cpufreq: entry point for global variables */ extern struct kobject *cpufreq_global_kobject; #define CPUFREQ_ETERNAL (-1) @@ -114,7 +105,6 @@ struct cpufreq_policy { #define CPUFREQ_SHARED_TYPE_ALL (2) #define CPUFREQ_SHARED_TYPE_ANY (3) -/******************** cpufreq transition notifiers *******************/ #define CPUFREQ_PRECHANGE (0) #define CPUFREQ_POSTCHANGE (1) @@ -129,15 +119,6 @@ struct cpufreq_freqs { }; -/** - * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch safe) - * @old: old value - * @div: divisor - * @mult: multiplier - * - * - * new = old * mult / div - */ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mult) { #if BITS_PER_LONG == 32 @@ -155,9 +136,6 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mu #endif }; -/********************************************************************* - * CPUFREQ GOVERNORS * - *********************************************************************/ #define CPUFREQ_GOV_START 1 #define CPUFREQ_GOV_STOP 2 @@ -176,9 +154,6 @@ struct cpufreq_governor { struct module *owner; }; -/* - * Pass a target to the cpufreq driver. - */ extern int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation); @@ -236,7 +211,6 @@ struct cpufreq_driver { struct freq_attr **attr; }; -/* flags */ #define CPUFREQ_STICKY 0x01 #define CPUFREQ_CONST_LOOPS 0x02 @@ -300,14 +274,10 @@ static struct global_attr _name = \ __ATTR(_name, 0644, show_##_name, store_##_name) -/********************************************************************* - * CPUFREQ 2.6. INTERFACE * - *********************************************************************/ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_update_policy(unsigned int cpu); #ifdef CONFIG_CPU_FREQ -/* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */ unsigned int cpufreq_get(unsigned int cpu); #else static inline unsigned int cpufreq_get(unsigned int cpu) @@ -316,7 +286,6 @@ static inline unsigned int cpufreq_get(unsigned int cpu) } #endif -/* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ #ifdef CONFIG_CPU_FREQ unsigned int cpufreq_quick_get(unsigned int cpu); unsigned int cpufreq_quick_get_max(unsigned int cpu); @@ -332,15 +301,8 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) #endif -/********************************************************************* - * CPUFREQ DEFAULT GOVERNOR * - *********************************************************************/ -/* - Performance governor is fallback governor if any other gov failed to - auto load due latency restrictions -*/ #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE extern struct cpufreq_governor cpufreq_gov_performance; #endif @@ -358,36 +320,15 @@ extern struct cpufreq_governor cpufreq_gov_ondemand; #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE) extern struct cpufreq_governor cpufreq_gov_conservative; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative) -#endif#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE) extern struct cpufreq_governor cpufreq_gov_interactive; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactive) #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND) extern struct cpufreq_governor cpufreq_gov_intellidemand; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_intellidemand) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2) -extern struct cpufreq_governor cpufreq_gov_smartass2; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_smartass2) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_MINMAX) -extern struct cpufreq_governor cpufreq_gov_minmax; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_minmax) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX) -extern struct cpufreq_governor cpufreq_gov_interactivex; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactivex) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_LAGFREE) -extern struct cpufreq_governor cpufreq_gov_lagfree; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_lagfree) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_LULZACTIVE) -extern struct cpufreq_governor cpufreq_gov_lulzactive; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_lulzactive) -#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS) -extern struct cpufreq_governor cpufreq_gov_smartass; -#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_smartass) #endif -/********************************************************************* - * FREQUENCY TABLE HELPERS * - *********************************************************************/ #define CPUFREQ_ENTRY_INVALID ~0 #define CPUFREQ_TABLE_END ~1 @@ -409,12 +350,10 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, unsigned int relation, unsigned int *index); -/* the following 3 funtions are for cpufreq core use only */ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); void cpufreq_cpu_put(struct cpufreq_policy *data); -/* the following are really really optional */ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, From 3347e15054d0959b118ca47164b7824d87beee49 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 11:01:13 -0400 Subject: [PATCH 23/35] CPUFREQ: Fixed all errors and dependencies for governors and merged This reverts commit 7e0f70e6304a15249e64bd4d1fb5da7f5fdef83f. --- drivers/cpufreq/Kconfig | 164 +++++++++++++++++++++++++++++++++++++++ drivers/cpufreq/Makefile | 11 +++ 2 files changed, 175 insertions(+) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 2e07ae54..65e05eeb 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -154,6 +154,91 @@ config CPU_FREQ_DEFAULT_GOV_SMARTASSH3 help Use the CPUFreq governor 'slp' as default. +config CPU_FREQ_DEFAULT_GOV_MINMAX + bool "minmax" + select CPU_FREQ_GOV_MINMAX + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'minmax' as default. This minimizes the + frequency jumps does by the governor. This is aimed at maximizing + both perfomance and battery life. + +config CPU_FREQ_DEFAULT_GOV_SMARTASS2 + bool "smartass2" + select CPU_FREQ_GOV_SMARTASS2 + help + Use the CPUFreq governor 'smartassV2' as default. + +config CPU_FREQ_DEFAULT_GOV_SAVAGEDZEN + bool "savagedzen" + select CPU_FREQ_GOV_SAVAGEDZEN + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lionheart' as default. + +config CPU_FREQ_DEFAULT_GOV_ONDEMANDX + bool "ondemandx" + select CPU_FREQ_GOV_ONDEMANDX + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lionheart' as default. + +config CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX + bool "brazilianwax" + select CPU_FREQ_GOV_BRAZILIANWAX + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'brazilianwax as default. + +config CPU_FREQ_DEFAULT_GOV_LAGFREE + bool "lagfree" + select CPU_FREQ_GOV_LAGFREE + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lagfree' as default. This allows + you to get a full dynamic frequency capable system by simply + loading your cpufreq low-level hardware driver. + Be aware that not all cpufreq drivers support the lagfree + governor. If unsure have a look at the help section of the + driver. Fallback governor will be the performance governor. + +config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX + bool "interactiveX" + select CPU_FREQ_GOV_INTERACTIVEX + help + Use the CPUFreq governor 'interactivex' as default. This allows + you to get a full dynamic cpu frequency capable system by simply + loading your cpufreq low-level hardware driver, using the + 'interactivex' governor for latency-sensitive workloads. + +config CPU_FREQ_DEFAULT_GOV_LULZACTIVE + bool "lulzactive" + select CPU_FREQ_GOV_LULZACTIVE + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lulzactive' as default. + +config CPU_FREQ_DEFAULT_GOV_SMARTASS + bool "smartass" + select CPU_FREQ_GOV_SMARTASS + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'smartass' as default. + +config CPU_FREQ_DEFAULT_GOV_LAZY + bool "lazy" + select CPU_FREQ_GOV_LAZY + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lazy' as default. + +config CPU_FREQ_DEFAULT_GOV_SCARY + bool "scary" + select CPU_FREQ_GOV_SCARY + select CPU_FREQ_GOV_SCARY + help + Use the CPUFreq governor 'scary' as default. + config CPU_FREQ_DEFAULT_GOV_USERSPACE bool "userspace" select CPU_FREQ_GOV_USERSPACE @@ -310,6 +395,85 @@ config CPU_FREQ_GOV_SMARTASSH3 help 'smartassH3' - a "smart" governor +config CPU_FREQ_GOV_LULZACTIVE + tristate "'lulzactive' cpufreq governor" + depends on CPU_FREQ + help + 'lulzactive' - a new interactive governor by Tegrak! + + If in doubt, say N. + +config CPU_FREQ_GOV_SMARTASS + tristate "'smartass' cpufreq governor" + depends on CPU_FREQ + help + 'smartass' - a "smart" optimized governor for the hero! + + If in doubt, say N. + +config CPU_FREQ_GOV_MINMAX + tristate "'minmax' cpufreq governor" + depends on CPU_FREQ + help + 'minmax' - this driver tries to minimize the frequency jumps by limiting + the the selected frequencies to either the min or the max frequency of + the policy. The frequency is selected according to the load. + +config CPU_FREQ_GOV_SMARTASS2 + tristate "'smartassV2' cpufreq governor" + depends on CPU_FREQ + help + 'smartassV2' - a "smart" optimized governor for the hero! + +config CPU_FREQ_GOV_INTERACTIVEX +tristate "'interactiveX' cpufreq policy governor" + help + 'interactiveX' - Modified version of interactive with sleep+wake code. + +config CPU_FREQ_GOV_LAGFREE + tristate "'lagfree' cpufreq governor" + depends on CPU_FREQ + help + 'lagfree' - this driver is rather similar to the 'ondemand' + governor both in its source code and its purpose, the difference is + its optimisation for better suitability in a battery powered + environment. The frequency is gracefully increased and decreased + rather than jumping to 100% when speed is required. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_lagfree. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +config CPU_FREQ_GOV_SCARY + tristate "'scary' cpufreq governor" + depends on CPU_FREQ + help + scary - a governor for cabbages + + If in doubt, say N. + +config CPU_FREQ_GOV_LAZY + tristate "'lazy' cpufreq governor" + depends on CPU_FREQ + +config CPU_FREQ_GOV_SAVAGEDZEN + tristate "'savagedzen' cpufreq governor" + depends on CPU_FREQ + +config CPU_FREQ_GOV_ONDEMANDX + tristate "'ondemandx' cpufreq governor" + depends on CPU_FREQ + +config CPU_FREQ_GOV_BRAZILIANWAX + tristate "'brazilianwax' cpufreq governor" + depends on CPU_FREQ + help + 'brazilianwax' - a "slightly more agressive smart" optimized governor! + If in doubt, say Y. + config CPU_FREQ_GOV_USERSPACE tristate "'userspace' governor for userspace frequency scaling" help diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index be135afd..206e55a5 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -20,6 +20,17 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o +obj-$(CONFIG_CPU_FREQ_GOV_MINMAX) += cpufreq_minmax.o +obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS2) += cpufreq_smartass2.o +obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS) += cpufreq_smartass.o +obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVEX) += cpufreq_interactivex.o +obj-$(CONFIG_CPU_FREQ_GOV_LAGFREE) += cpufreq_lagfree.o +obj-$(CONFIG_CPU_FREQ_GOV_LULZACTIVE) += cpufreq_lulzactive.o +obj-$(CONFIG_CPU_FREQ_GOV_LAZY) += cpufreq_lazy.o +obj-$(CONFIG_CPU_FREQ_GOV_SCARY) += cpufreq_scary.o +obj-$(CONFIG_CPU_FREQ_GOV_BRAZILIANWAX) += cpufreq_brazilianwax.o +obj-$(CONFIG_CPU_FREQ_GOV_ONDEMANDX) += cpufreq_ondemandx.o +obj-$(CONFIG_CPU_FREQ_GOV_SAVAGEDZEN) += cpufreq_savagedzen.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o From 3e87a2c3eedd4f8af550215a8752997de6a52902 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 11:21:08 -0400 Subject: [PATCH 24/35] blahh --- drivers/cpufreq/Kconfig | 164 --------------------------------------- drivers/cpufreq/Makefile | 11 --- 2 files changed, 175 deletions(-) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 65e05eeb..2e07ae54 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -154,91 +154,6 @@ config CPU_FREQ_DEFAULT_GOV_SMARTASSH3 help Use the CPUFreq governor 'slp' as default. -config CPU_FREQ_DEFAULT_GOV_MINMAX - bool "minmax" - select CPU_FREQ_GOV_MINMAX - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'minmax' as default. This minimizes the - frequency jumps does by the governor. This is aimed at maximizing - both perfomance and battery life. - -config CPU_FREQ_DEFAULT_GOV_SMARTASS2 - bool "smartass2" - select CPU_FREQ_GOV_SMARTASS2 - help - Use the CPUFreq governor 'smartassV2' as default. - -config CPU_FREQ_DEFAULT_GOV_SAVAGEDZEN - bool "savagedzen" - select CPU_FREQ_GOV_SAVAGEDZEN - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lionheart' as default. - -config CPU_FREQ_DEFAULT_GOV_ONDEMANDX - bool "ondemandx" - select CPU_FREQ_GOV_ONDEMANDX - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lionheart' as default. - -config CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX - bool "brazilianwax" - select CPU_FREQ_GOV_BRAZILIANWAX - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'brazilianwax as default. - -config CPU_FREQ_DEFAULT_GOV_LAGFREE - bool "lagfree" - select CPU_FREQ_GOV_LAGFREE - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lagfree' as default. This allows - you to get a full dynamic frequency capable system by simply - loading your cpufreq low-level hardware driver. - Be aware that not all cpufreq drivers support the lagfree - governor. If unsure have a look at the help section of the - driver. Fallback governor will be the performance governor. - -config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX - bool "interactiveX" - select CPU_FREQ_GOV_INTERACTIVEX - help - Use the CPUFreq governor 'interactivex' as default. This allows - you to get a full dynamic cpu frequency capable system by simply - loading your cpufreq low-level hardware driver, using the - 'interactivex' governor for latency-sensitive workloads. - -config CPU_FREQ_DEFAULT_GOV_LULZACTIVE - bool "lulzactive" - select CPU_FREQ_GOV_LULZACTIVE - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lulzactive' as default. - -config CPU_FREQ_DEFAULT_GOV_SMARTASS - bool "smartass" - select CPU_FREQ_GOV_SMARTASS - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'smartass' as default. - -config CPU_FREQ_DEFAULT_GOV_LAZY - bool "lazy" - select CPU_FREQ_GOV_LAZY - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lazy' as default. - -config CPU_FREQ_DEFAULT_GOV_SCARY - bool "scary" - select CPU_FREQ_GOV_SCARY - select CPU_FREQ_GOV_SCARY - help - Use the CPUFreq governor 'scary' as default. - config CPU_FREQ_DEFAULT_GOV_USERSPACE bool "userspace" select CPU_FREQ_GOV_USERSPACE @@ -395,85 +310,6 @@ config CPU_FREQ_GOV_SMARTASSH3 help 'smartassH3' - a "smart" governor -config CPU_FREQ_GOV_LULZACTIVE - tristate "'lulzactive' cpufreq governor" - depends on CPU_FREQ - help - 'lulzactive' - a new interactive governor by Tegrak! - - If in doubt, say N. - -config CPU_FREQ_GOV_SMARTASS - tristate "'smartass' cpufreq governor" - depends on CPU_FREQ - help - 'smartass' - a "smart" optimized governor for the hero! - - If in doubt, say N. - -config CPU_FREQ_GOV_MINMAX - tristate "'minmax' cpufreq governor" - depends on CPU_FREQ - help - 'minmax' - this driver tries to minimize the frequency jumps by limiting - the the selected frequencies to either the min or the max frequency of - the policy. The frequency is selected according to the load. - -config CPU_FREQ_GOV_SMARTASS2 - tristate "'smartassV2' cpufreq governor" - depends on CPU_FREQ - help - 'smartassV2' - a "smart" optimized governor for the hero! - -config CPU_FREQ_GOV_INTERACTIVEX -tristate "'interactiveX' cpufreq policy governor" - help - 'interactiveX' - Modified version of interactive with sleep+wake code. - -config CPU_FREQ_GOV_LAGFREE - tristate "'lagfree' cpufreq governor" - depends on CPU_FREQ - help - 'lagfree' - this driver is rather similar to the 'ondemand' - governor both in its source code and its purpose, the difference is - its optimisation for better suitability in a battery powered - environment. The frequency is gracefully increased and decreased - rather than jumping to 100% when speed is required. - - To compile this driver as a module, choose M here: the - module will be called cpufreq_lagfree. - - For details, take a look at linux/Documentation/cpu-freq. - - If in doubt, say N. - -config CPU_FREQ_GOV_SCARY - tristate "'scary' cpufreq governor" - depends on CPU_FREQ - help - scary - a governor for cabbages - - If in doubt, say N. - -config CPU_FREQ_GOV_LAZY - tristate "'lazy' cpufreq governor" - depends on CPU_FREQ - -config CPU_FREQ_GOV_SAVAGEDZEN - tristate "'savagedzen' cpufreq governor" - depends on CPU_FREQ - -config CPU_FREQ_GOV_ONDEMANDX - tristate "'ondemandx' cpufreq governor" - depends on CPU_FREQ - -config CPU_FREQ_GOV_BRAZILIANWAX - tristate "'brazilianwax' cpufreq governor" - depends on CPU_FREQ - help - 'brazilianwax' - a "slightly more agressive smart" optimized governor! - If in doubt, say Y. - config CPU_FREQ_GOV_USERSPACE tristate "'userspace' governor for userspace frequency scaling" help diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 206e55a5..be135afd 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -20,17 +20,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o -obj-$(CONFIG_CPU_FREQ_GOV_MINMAX) += cpufreq_minmax.o -obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS2) += cpufreq_smartass2.o -obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS) += cpufreq_smartass.o -obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVEX) += cpufreq_interactivex.o -obj-$(CONFIG_CPU_FREQ_GOV_LAGFREE) += cpufreq_lagfree.o -obj-$(CONFIG_CPU_FREQ_GOV_LULZACTIVE) += cpufreq_lulzactive.o -obj-$(CONFIG_CPU_FREQ_GOV_LAZY) += cpufreq_lazy.o -obj-$(CONFIG_CPU_FREQ_GOV_SCARY) += cpufreq_scary.o -obj-$(CONFIG_CPU_FREQ_GOV_BRAZILIANWAX) += cpufreq_brazilianwax.o -obj-$(CONFIG_CPU_FREQ_GOV_ONDEMANDX) += cpufreq_ondemandx.o -obj-$(CONFIG_CPU_FREQ_GOV_SAVAGEDZEN) += cpufreq_savagedzen.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o From 173f8bcece92bb1e886d25d43495048e038dba8b Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 11:22:05 -0400 Subject: [PATCH 25/35] Revert "blahh" This reverts commit 3e87a2c3eedd4f8af550215a8752997de6a52902. --- drivers/cpufreq/Kconfig | 164 +++++++++++++++++++++++++++++++++++++++ drivers/cpufreq/Makefile | 11 +++ 2 files changed, 175 insertions(+) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 2e07ae54..65e05eeb 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -154,6 +154,91 @@ config CPU_FREQ_DEFAULT_GOV_SMARTASSH3 help Use the CPUFreq governor 'slp' as default. +config CPU_FREQ_DEFAULT_GOV_MINMAX + bool "minmax" + select CPU_FREQ_GOV_MINMAX + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'minmax' as default. This minimizes the + frequency jumps does by the governor. This is aimed at maximizing + both perfomance and battery life. + +config CPU_FREQ_DEFAULT_GOV_SMARTASS2 + bool "smartass2" + select CPU_FREQ_GOV_SMARTASS2 + help + Use the CPUFreq governor 'smartassV2' as default. + +config CPU_FREQ_DEFAULT_GOV_SAVAGEDZEN + bool "savagedzen" + select CPU_FREQ_GOV_SAVAGEDZEN + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lionheart' as default. + +config CPU_FREQ_DEFAULT_GOV_ONDEMANDX + bool "ondemandx" + select CPU_FREQ_GOV_ONDEMANDX + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lionheart' as default. + +config CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX + bool "brazilianwax" + select CPU_FREQ_GOV_BRAZILIANWAX + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'brazilianwax as default. + +config CPU_FREQ_DEFAULT_GOV_LAGFREE + bool "lagfree" + select CPU_FREQ_GOV_LAGFREE + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lagfree' as default. This allows + you to get a full dynamic frequency capable system by simply + loading your cpufreq low-level hardware driver. + Be aware that not all cpufreq drivers support the lagfree + governor. If unsure have a look at the help section of the + driver. Fallback governor will be the performance governor. + +config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX + bool "interactiveX" + select CPU_FREQ_GOV_INTERACTIVEX + help + Use the CPUFreq governor 'interactivex' as default. This allows + you to get a full dynamic cpu frequency capable system by simply + loading your cpufreq low-level hardware driver, using the + 'interactivex' governor for latency-sensitive workloads. + +config CPU_FREQ_DEFAULT_GOV_LULZACTIVE + bool "lulzactive" + select CPU_FREQ_GOV_LULZACTIVE + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lulzactive' as default. + +config CPU_FREQ_DEFAULT_GOV_SMARTASS + bool "smartass" + select CPU_FREQ_GOV_SMARTASS + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'smartass' as default. + +config CPU_FREQ_DEFAULT_GOV_LAZY + bool "lazy" + select CPU_FREQ_GOV_LAZY + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lazy' as default. + +config CPU_FREQ_DEFAULT_GOV_SCARY + bool "scary" + select CPU_FREQ_GOV_SCARY + select CPU_FREQ_GOV_SCARY + help + Use the CPUFreq governor 'scary' as default. + config CPU_FREQ_DEFAULT_GOV_USERSPACE bool "userspace" select CPU_FREQ_GOV_USERSPACE @@ -310,6 +395,85 @@ config CPU_FREQ_GOV_SMARTASSH3 help 'smartassH3' - a "smart" governor +config CPU_FREQ_GOV_LULZACTIVE + tristate "'lulzactive' cpufreq governor" + depends on CPU_FREQ + help + 'lulzactive' - a new interactive governor by Tegrak! + + If in doubt, say N. + +config CPU_FREQ_GOV_SMARTASS + tristate "'smartass' cpufreq governor" + depends on CPU_FREQ + help + 'smartass' - a "smart" optimized governor for the hero! + + If in doubt, say N. + +config CPU_FREQ_GOV_MINMAX + tristate "'minmax' cpufreq governor" + depends on CPU_FREQ + help + 'minmax' - this driver tries to minimize the frequency jumps by limiting + the the selected frequencies to either the min or the max frequency of + the policy. The frequency is selected according to the load. + +config CPU_FREQ_GOV_SMARTASS2 + tristate "'smartassV2' cpufreq governor" + depends on CPU_FREQ + help + 'smartassV2' - a "smart" optimized governor for the hero! + +config CPU_FREQ_GOV_INTERACTIVEX +tristate "'interactiveX' cpufreq policy governor" + help + 'interactiveX' - Modified version of interactive with sleep+wake code. + +config CPU_FREQ_GOV_LAGFREE + tristate "'lagfree' cpufreq governor" + depends on CPU_FREQ + help + 'lagfree' - this driver is rather similar to the 'ondemand' + governor both in its source code and its purpose, the difference is + its optimisation for better suitability in a battery powered + environment. The frequency is gracefully increased and decreased + rather than jumping to 100% when speed is required. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_lagfree. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +config CPU_FREQ_GOV_SCARY + tristate "'scary' cpufreq governor" + depends on CPU_FREQ + help + scary - a governor for cabbages + + If in doubt, say N. + +config CPU_FREQ_GOV_LAZY + tristate "'lazy' cpufreq governor" + depends on CPU_FREQ + +config CPU_FREQ_GOV_SAVAGEDZEN + tristate "'savagedzen' cpufreq governor" + depends on CPU_FREQ + +config CPU_FREQ_GOV_ONDEMANDX + tristate "'ondemandx' cpufreq governor" + depends on CPU_FREQ + +config CPU_FREQ_GOV_BRAZILIANWAX + tristate "'brazilianwax' cpufreq governor" + depends on CPU_FREQ + help + 'brazilianwax' - a "slightly more agressive smart" optimized governor! + If in doubt, say Y. + config CPU_FREQ_GOV_USERSPACE tristate "'userspace' governor for userspace frequency scaling" help diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index be135afd..206e55a5 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -20,6 +20,17 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o +obj-$(CONFIG_CPU_FREQ_GOV_MINMAX) += cpufreq_minmax.o +obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS2) += cpufreq_smartass2.o +obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS) += cpufreq_smartass.o +obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVEX) += cpufreq_interactivex.o +obj-$(CONFIG_CPU_FREQ_GOV_LAGFREE) += cpufreq_lagfree.o +obj-$(CONFIG_CPU_FREQ_GOV_LULZACTIVE) += cpufreq_lulzactive.o +obj-$(CONFIG_CPU_FREQ_GOV_LAZY) += cpufreq_lazy.o +obj-$(CONFIG_CPU_FREQ_GOV_SCARY) += cpufreq_scary.o +obj-$(CONFIG_CPU_FREQ_GOV_BRAZILIANWAX) += cpufreq_brazilianwax.o +obj-$(CONFIG_CPU_FREQ_GOV_ONDEMANDX) += cpufreq_ondemandx.o +obj-$(CONFIG_CPU_FREQ_GOV_SAVAGEDZEN) += cpufreq_savagedzen.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o From 19fd433d9aaa7c30c4b682b769f60859baf33e5a Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 11:22:18 -0400 Subject: [PATCH 26/35] Revert "CPUFREQ: Fixed all errors and dependencies for governors and merged" This reverts commit 3347e15054d0959b118ca47164b7824d87beee49. --- drivers/cpufreq/Kconfig | 164 --------------------------------------- drivers/cpufreq/Makefile | 11 --- 2 files changed, 175 deletions(-) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 65e05eeb..2e07ae54 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -154,91 +154,6 @@ config CPU_FREQ_DEFAULT_GOV_SMARTASSH3 help Use the CPUFreq governor 'slp' as default. -config CPU_FREQ_DEFAULT_GOV_MINMAX - bool "minmax" - select CPU_FREQ_GOV_MINMAX - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'minmax' as default. This minimizes the - frequency jumps does by the governor. This is aimed at maximizing - both perfomance and battery life. - -config CPU_FREQ_DEFAULT_GOV_SMARTASS2 - bool "smartass2" - select CPU_FREQ_GOV_SMARTASS2 - help - Use the CPUFreq governor 'smartassV2' as default. - -config CPU_FREQ_DEFAULT_GOV_SAVAGEDZEN - bool "savagedzen" - select CPU_FREQ_GOV_SAVAGEDZEN - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lionheart' as default. - -config CPU_FREQ_DEFAULT_GOV_ONDEMANDX - bool "ondemandx" - select CPU_FREQ_GOV_ONDEMANDX - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lionheart' as default. - -config CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX - bool "brazilianwax" - select CPU_FREQ_GOV_BRAZILIANWAX - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'brazilianwax as default. - -config CPU_FREQ_DEFAULT_GOV_LAGFREE - bool "lagfree" - select CPU_FREQ_GOV_LAGFREE - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lagfree' as default. This allows - you to get a full dynamic frequency capable system by simply - loading your cpufreq low-level hardware driver. - Be aware that not all cpufreq drivers support the lagfree - governor. If unsure have a look at the help section of the - driver. Fallback governor will be the performance governor. - -config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX - bool "interactiveX" - select CPU_FREQ_GOV_INTERACTIVEX - help - Use the CPUFreq governor 'interactivex' as default. This allows - you to get a full dynamic cpu frequency capable system by simply - loading your cpufreq low-level hardware driver, using the - 'interactivex' governor for latency-sensitive workloads. - -config CPU_FREQ_DEFAULT_GOV_LULZACTIVE - bool "lulzactive" - select CPU_FREQ_GOV_LULZACTIVE - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lulzactive' as default. - -config CPU_FREQ_DEFAULT_GOV_SMARTASS - bool "smartass" - select CPU_FREQ_GOV_SMARTASS - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'smartass' as default. - -config CPU_FREQ_DEFAULT_GOV_LAZY - bool "lazy" - select CPU_FREQ_GOV_LAZY - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lazy' as default. - -config CPU_FREQ_DEFAULT_GOV_SCARY - bool "scary" - select CPU_FREQ_GOV_SCARY - select CPU_FREQ_GOV_SCARY - help - Use the CPUFreq governor 'scary' as default. - config CPU_FREQ_DEFAULT_GOV_USERSPACE bool "userspace" select CPU_FREQ_GOV_USERSPACE @@ -395,85 +310,6 @@ config CPU_FREQ_GOV_SMARTASSH3 help 'smartassH3' - a "smart" governor -config CPU_FREQ_GOV_LULZACTIVE - tristate "'lulzactive' cpufreq governor" - depends on CPU_FREQ - help - 'lulzactive' - a new interactive governor by Tegrak! - - If in doubt, say N. - -config CPU_FREQ_GOV_SMARTASS - tristate "'smartass' cpufreq governor" - depends on CPU_FREQ - help - 'smartass' - a "smart" optimized governor for the hero! - - If in doubt, say N. - -config CPU_FREQ_GOV_MINMAX - tristate "'minmax' cpufreq governor" - depends on CPU_FREQ - help - 'minmax' - this driver tries to minimize the frequency jumps by limiting - the the selected frequencies to either the min or the max frequency of - the policy. The frequency is selected according to the load. - -config CPU_FREQ_GOV_SMARTASS2 - tristate "'smartassV2' cpufreq governor" - depends on CPU_FREQ - help - 'smartassV2' - a "smart" optimized governor for the hero! - -config CPU_FREQ_GOV_INTERACTIVEX -tristate "'interactiveX' cpufreq policy governor" - help - 'interactiveX' - Modified version of interactive with sleep+wake code. - -config CPU_FREQ_GOV_LAGFREE - tristate "'lagfree' cpufreq governor" - depends on CPU_FREQ - help - 'lagfree' - this driver is rather similar to the 'ondemand' - governor both in its source code and its purpose, the difference is - its optimisation for better suitability in a battery powered - environment. The frequency is gracefully increased and decreased - rather than jumping to 100% when speed is required. - - To compile this driver as a module, choose M here: the - module will be called cpufreq_lagfree. - - For details, take a look at linux/Documentation/cpu-freq. - - If in doubt, say N. - -config CPU_FREQ_GOV_SCARY - tristate "'scary' cpufreq governor" - depends on CPU_FREQ - help - scary - a governor for cabbages - - If in doubt, say N. - -config CPU_FREQ_GOV_LAZY - tristate "'lazy' cpufreq governor" - depends on CPU_FREQ - -config CPU_FREQ_GOV_SAVAGEDZEN - tristate "'savagedzen' cpufreq governor" - depends on CPU_FREQ - -config CPU_FREQ_GOV_ONDEMANDX - tristate "'ondemandx' cpufreq governor" - depends on CPU_FREQ - -config CPU_FREQ_GOV_BRAZILIANWAX - tristate "'brazilianwax' cpufreq governor" - depends on CPU_FREQ - help - 'brazilianwax' - a "slightly more agressive smart" optimized governor! - If in doubt, say Y. - config CPU_FREQ_GOV_USERSPACE tristate "'userspace' governor for userspace frequency scaling" help diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 206e55a5..be135afd 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -20,17 +20,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o -obj-$(CONFIG_CPU_FREQ_GOV_MINMAX) += cpufreq_minmax.o -obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS2) += cpufreq_smartass2.o -obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS) += cpufreq_smartass.o -obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVEX) += cpufreq_interactivex.o -obj-$(CONFIG_CPU_FREQ_GOV_LAGFREE) += cpufreq_lagfree.o -obj-$(CONFIG_CPU_FREQ_GOV_LULZACTIVE) += cpufreq_lulzactive.o -obj-$(CONFIG_CPU_FREQ_GOV_LAZY) += cpufreq_lazy.o -obj-$(CONFIG_CPU_FREQ_GOV_SCARY) += cpufreq_scary.o -obj-$(CONFIG_CPU_FREQ_GOV_BRAZILIANWAX) += cpufreq_brazilianwax.o -obj-$(CONFIG_CPU_FREQ_GOV_ONDEMANDX) += cpufreq_ondemandx.o -obj-$(CONFIG_CPU_FREQ_GOV_SAVAGEDZEN) += cpufreq_savagedzen.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o From 5a55cd645f34006c58f7fe81c5698c8512f9fe07 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 12:26:49 -0400 Subject: [PATCH 27/35] Govs --- drivers/cpufreq/Kconfig | 314 ++++++++++++++++++++++++++++++++++++++- drivers/cpufreq/Makefile | 10 ++ 2 files changed, 323 insertions(+), 1 deletion(-) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 2e07ae54..9d37962e 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -170,7 +170,319 @@ config CPU_FREQ_DEFAULT_GOV_WHEATLEY ---help--- Use the CPUFreq governor 'wheatley' as default. -endchoice +config CPU_FREQ_DEFAULT_GOV_LAGFREE + bool "lagfree" + select CPU_FREQ_GOV_LAGFREE + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lagfree' as default. This allows + you to get a full dynamic frequency capable system by simply + loading your cpufreq low-level hardware driver. + Be aware that not all cpufreq drivers support the lagfree + governor. If unsure have a look at the help section of the + driver. Fallback governor will be the performance governor. + +config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX + bool "interactiveX" + select CPU_FREQ_GOV_INTERACTIVEX + help + Use the CPUFreq governor 'interactiveX' as default. This allows + you to get a full dynamic cpu frequency capable system by simply + loading your cpufreq low-level hardware driver, using the + 'interactiveX' governor for latency-sensitive workloads. + +config CPU_FREQ_DEFAULT_GOV_LAZY + + bool "lazy" + select CPU_FREQ_GOV_LAZY + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lazy' as default. + Fallback governor will be the performance governor. + +config CPU_FREQ_DEFAULT_GOV_LULZACTIVE + + bool "lulzactive" + select CPU_FREQ_GOV_LULZACTIVE + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lulzactive' as default. + Fallback governor will be the performance governor. + +config CPU_FREQ_DEFAULT_GOV_ONDEMANDX + + bool "ondemandx" + select CPU_FREQ_GOV_ONDEMANDX + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'ondemandX' as default. + Fallback governor will be the performance governor. + +config CPU_FREQ_DEFAULT_GOV_MINMAX + + bool "minmax" + select CPU_FREQ_GOV_MINMAX + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'minmax' as default. This minimizes the + frequency jumps does by the governor. This is aimed at maximizing + both perfomance and battery life. + +config CPU_FREQ_DEFAULT_GOV_SavagedZen + + bool "smartass" + select CPU_FREQ_GOV_SavagedZen + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'SavagedZen' as default. + +config CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX + bool "brazilianwax" + select CPU_FREQ_GOV_BRAZILIANWAX + help + Use the CPUFreq governor 'brazilianwax' as default. + +config CPU_FREQ_DEFAULT_GOV_SCARY + bool "scary" + select CPU_FREQ_GOV_SCARY + help + Use as default governor + +config CPU_FREQ_DEFAULT_GOV_SMOOTHASS + bool "smoothass" + select CPU_FREQ_GOV_SMOOTHASS + help + Use as default governor + +config CPU_FREQ_GOV_BRAZILIANWAX + tristate "'brazilianwax' cpufreq governor" + depends on CPU_FREQ + help + 'brazilianwax' - a "smart" governor + + If in doubt, say N. + +config CPU_FREQ_GOV_LULZACTIVE + tristate "'lulzactive' cpufreq governor" + depends on CPU_FREQ + help + 'lulzactive' - a new interactive governor by Tegrak! + + If in doubt, say N. + +config CPU_FREQ_GOV_LAZY + tristate "'lazy' cpufreq governor" + depends on CPU_FREQ + +config CPU_FREQ_GOV_LAGFREE + tristate "'lagfree' cpufreq governor" + depends on CPU_FREQ + help + 'lagfree' - this driver is rather similar to the 'ondemand' + governor both in its source code and its purpose, the difference is + its optimisation for better suitability in a battery powered + environment. The frequency is gracefully increased and decreased + rather than jumping to 100% when speed is required. + +config LAGFREE_MAX_LOAD + int "Max CPU Load" + default 50 + depends on CPU_FREQ_GOV_LAGFREE + help + CPU freq will be increased if measured load > max_cpu_load; + +config LAGFREE_MIN_LOAD + int "Min CPU Load" + default 15 + depends on CPU_FREQ_GOV_LAGFREE + help + CPU freq will be decrease if measured load < min_cpu_load; + +config LAGFREE_FREQ_STEP_DOWN + int "Frequency Step Down" + default 108000 + depends on CPU_FREQ_GOV_LAGFREE + help + Max freqeuncy delta when ramping down. + +config LAGFREE_FREQ_SLEEP_MAX + int "Max Sleep frequeny" + default 384000 + depends on CPU_FREQ_GOV_LAGFREE + help + Max freqeuncy for screen off. + +config LAGFREE_FREQ_AWAKE_MIN + int "Min Awake frequeny" + default 384000 + depends on CPU_FREQ_GOV_LAGFREE + help + Min freqeuncy for screen on. + +config LAGFREE_FREQ_STEP_UP_SLEEP_PERCENT + int "Freq step up percent sleep" + default 20 + depends on CPU_FREQ_GOV_LAGFREE + help + Frequency percent to step up while screen off. + +config CPU_FREQ_DEFAULT_GOV_HOTPLUG + bool "hotplug" + select CPU_FREQ_GOV_HOTPLUG + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'hotplug' as default. This allows you + to get a full dynamic frequency capable system with CPU + hotplug support by simply loading your cpufreq low-level + hardware driver. Be aware that not all cpufreq drivers + support the hotplug governor. If unsure have a look at + the help section of the driver. Fallback governor will be the + performance governor. + +config CPU_FREQ_GOV_SMOOTHASS + tristate "'smoothass' cpufreq governor" + depends on CPU_FREQ + help + Yet another governor by erasmux + +config CPU_FREQ_GOV_SCARY + tristate "'scary' cpufreq governor" + depends on CPU_FREQ + help + Use as default governor + +config CPU_FREQ_GOV_ASSWAX + tristate "'asswax' cpufreq governor" + depends on CPU_FREQ + help + Use as default governors + +config CPU_FREQ_GOV_LAGFREE + tristate "'lagfree' cpufreq governor" + depends on CPU_FREQ + help + 'lagfree' - this driver is rather similar to the 'ondemand' + governor both in its source code and its purpose, the difference is + its optimisation for better suitability in a battery powered + environment. The frequency is gracefully increased and decreased + rather than jumping to 100% when speed is required. + +config LAGFREE_MAX_LOAD + int "Max CPU Load" + default 50 + depends on CPU_FREQ_GOV_LAGFREE + help + CPU freq will be increased if measured load > max_cpu_load; + +config LAGFREE_MIN_LOAD + int "Min CPU Load" + default 15 + depends on CPU_FREQ_GOV_LAGFREE + help + CPU freq will be decrease if measured load < min_cpu_load; + +config LAGFREE_FREQ_STEP_DOWN + int "Frequency Step Down" + default 108000 + depends on CPU_FREQ_GOV_LAGFREE + help + Max freqeuncy delta when ramping down. + +config LAGFREE_FREQ_SLEEP_MAX + int "Max Sleep frequeny" + default 384000 + depends on CPU_FREQ_GOV_LAGFREE + help + Max freqeuncy for screen off. + +config LAGFREE_FREQ_AWAKE_MIN + int "Min Awake frequeny" + default 384000 + depends on CPU_FREQ_GOV_LAGFREE + help + Min freqeuncy for screen on. + +config LAGFREE_FREQ_STEP_UP_SLEEP_PERCENT + int "Freq step up percent sleep" + default 20 + depends on CPU_FREQ_GOV_LAGFREE + help + Frequency percent to step up while screen off. + +config CPU_FREQ_GOV_LAZY + + tristate "'lazy' cpufreq governor" + depends on CPU_FREQ + help + 'lazy' - a "lazy" governor + + If in doubt, say N. + +config CPU_FREQ_GOV_LULZACTIVE + + tristate "'lulzactive' cpufreq governor" + depends on CPU_FREQ + help + 'lulzactive' - a new interactive governor by Tegrak! + + If in doubt, say N. + +config CPU_FREQ_GOV_ONDEMANDX + + tristate "'ondemandx' cpufreq governor" + depends on CPU_FREQ + help + 'ondemandx' - a new iteration of the ondemand governor + + If in doubt, say N. + +config CPU_FREQ_GOV_MINMAX + + tristate "'minmax' cpufreq governor" + depends on CPU_FREQ + help + 'minmax' - this driver tries to minimize the frequency jumps by limiting + the the selected frequencies to either the min or the max frequency of + the policy. The frequency is selected according to the load. + + If in doubt, say N. + +config CPU_FREQ_GOV_SavagedZen + + tristate "'savagedzen' cpufreq governor" + depends on CPU_FREQ + help + 'Savaged-Zen' - a smartass based governor + + If in doubt, say N. + +config CPU_FREQ_GOV_BRAZILIANWAX + tristate "'brazilianwax' cpufreq governor" + depends on CPU_FREQ + help + brazilianwax' - a "slightly more agressive smart" optimized governor! + + If in doubt, say Y. + +config CPU_FREQ_GOV_HOTPLUG + tristate "'hotplug' cpufreq governor" + depends on CPU_FREQ && NO_HZ && HOTPLUG_CPU + help + 'hotplug' - this driver mimics the frequency scaling behavior + in 'ondemand', but with several key differences. First is + that frequency transitions use the CPUFreq table directly, + instead of incrementing in a percentage of the maximum + available frequency. Second 'hotplug' will offline auxillary + CPUs when the system is idle, and online those CPUs once the + system becomes busy again. This last feature is needed for + architectures which transition to low power states when only + the "master" CPU is online, or for thermally constrained + devices. + + If you don't have one of these architectures or devices, use + 'ondemand' instead. + + If in doubt, say N. config CPU_FREQ_GOV_ABYSSPLUG tristate "'abyssplug' cpufreq governor" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index be135afd..75c24cbe 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -20,6 +20,16 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o +obj-$(CONFIG_CPU_FREQ_GOV_LAGFREE) += cpufreq_lagfree.o +obj-$(CONFIG_CPU_FREQ_GOV_LAZY) += cpufreq_lazy.o +obj-$(CONFIG_CPU_FREQ_GOV_ONDEMANDX) += cpufreq_ondemandx.o +obj-$(CONFIG_CPU_FREQ_GOV_lulzactive) += cpufreq_lulzactive.o +obj-$(CONFIG_CPU_FREQ_GOV_SavagedZen) += cpufreq_savagedzen.o +obj-$(CONFIG_CPU_FREQ_GOV_BRAZILIANWAX) += cpufreq_brazilianwax.o +obj-$(CONFIG_CPU_FREQ_GOV_SCARY) += cpufreq_scary.o +obj-$(CONFIG_CPU_FREQ_GOV_SMOOTHASS) += cpufreq_smoothass.o +obj-$(CONFIG_CPU_FREQ_GOV_SCARY) += cpufreq_scary.o +obj-$(CONFIG_CPU_FREQ_GOV_HOTPLUG) += cpufreq_hotplug.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o From cfd189f951876bff0a03c6482bca03fe5b85e23e Mon Sep 17 00:00:00 2001 From: Lens-F Date: Wed, 7 Aug 2013 12:30:53 -0400 Subject: [PATCH 28/35] Revert "Govs" This reverts commit 5a55cd645f34006c58f7fe81c5698c8512f9fe07. --- drivers/cpufreq/Kconfig | 314 +-------------------------------------- drivers/cpufreq/Makefile | 10 -- 2 files changed, 1 insertion(+), 323 deletions(-) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 9d37962e..2e07ae54 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -170,319 +170,7 @@ config CPU_FREQ_DEFAULT_GOV_WHEATLEY ---help--- Use the CPUFreq governor 'wheatley' as default. -config CPU_FREQ_DEFAULT_GOV_LAGFREE - bool "lagfree" - select CPU_FREQ_GOV_LAGFREE - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lagfree' as default. This allows - you to get a full dynamic frequency capable system by simply - loading your cpufreq low-level hardware driver. - Be aware that not all cpufreq drivers support the lagfree - governor. If unsure have a look at the help section of the - driver. Fallback governor will be the performance governor. - -config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX - bool "interactiveX" - select CPU_FREQ_GOV_INTERACTIVEX - help - Use the CPUFreq governor 'interactiveX' as default. This allows - you to get a full dynamic cpu frequency capable system by simply - loading your cpufreq low-level hardware driver, using the - 'interactiveX' governor for latency-sensitive workloads. - -config CPU_FREQ_DEFAULT_GOV_LAZY - - bool "lazy" - select CPU_FREQ_GOV_LAZY - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lazy' as default. - Fallback governor will be the performance governor. - -config CPU_FREQ_DEFAULT_GOV_LULZACTIVE - - bool "lulzactive" - select CPU_FREQ_GOV_LULZACTIVE - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lulzactive' as default. - Fallback governor will be the performance governor. - -config CPU_FREQ_DEFAULT_GOV_ONDEMANDX - - bool "ondemandx" - select CPU_FREQ_GOV_ONDEMANDX - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'ondemandX' as default. - Fallback governor will be the performance governor. - -config CPU_FREQ_DEFAULT_GOV_MINMAX - - bool "minmax" - select CPU_FREQ_GOV_MINMAX - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'minmax' as default. This minimizes the - frequency jumps does by the governor. This is aimed at maximizing - both perfomance and battery life. - -config CPU_FREQ_DEFAULT_GOV_SavagedZen - - bool "smartass" - select CPU_FREQ_GOV_SavagedZen - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'SavagedZen' as default. - -config CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX - bool "brazilianwax" - select CPU_FREQ_GOV_BRAZILIANWAX - help - Use the CPUFreq governor 'brazilianwax' as default. - -config CPU_FREQ_DEFAULT_GOV_SCARY - bool "scary" - select CPU_FREQ_GOV_SCARY - help - Use as default governor - -config CPU_FREQ_DEFAULT_GOV_SMOOTHASS - bool "smoothass" - select CPU_FREQ_GOV_SMOOTHASS - help - Use as default governor - -config CPU_FREQ_GOV_BRAZILIANWAX - tristate "'brazilianwax' cpufreq governor" - depends on CPU_FREQ - help - 'brazilianwax' - a "smart" governor - - If in doubt, say N. - -config CPU_FREQ_GOV_LULZACTIVE - tristate "'lulzactive' cpufreq governor" - depends on CPU_FREQ - help - 'lulzactive' - a new interactive governor by Tegrak! - - If in doubt, say N. - -config CPU_FREQ_GOV_LAZY - tristate "'lazy' cpufreq governor" - depends on CPU_FREQ - -config CPU_FREQ_GOV_LAGFREE - tristate "'lagfree' cpufreq governor" - depends on CPU_FREQ - help - 'lagfree' - this driver is rather similar to the 'ondemand' - governor both in its source code and its purpose, the difference is - its optimisation for better suitability in a battery powered - environment. The frequency is gracefully increased and decreased - rather than jumping to 100% when speed is required. - -config LAGFREE_MAX_LOAD - int "Max CPU Load" - default 50 - depends on CPU_FREQ_GOV_LAGFREE - help - CPU freq will be increased if measured load > max_cpu_load; - -config LAGFREE_MIN_LOAD - int "Min CPU Load" - default 15 - depends on CPU_FREQ_GOV_LAGFREE - help - CPU freq will be decrease if measured load < min_cpu_load; - -config LAGFREE_FREQ_STEP_DOWN - int "Frequency Step Down" - default 108000 - depends on CPU_FREQ_GOV_LAGFREE - help - Max freqeuncy delta when ramping down. - -config LAGFREE_FREQ_SLEEP_MAX - int "Max Sleep frequeny" - default 384000 - depends on CPU_FREQ_GOV_LAGFREE - help - Max freqeuncy for screen off. - -config LAGFREE_FREQ_AWAKE_MIN - int "Min Awake frequeny" - default 384000 - depends on CPU_FREQ_GOV_LAGFREE - help - Min freqeuncy for screen on. - -config LAGFREE_FREQ_STEP_UP_SLEEP_PERCENT - int "Freq step up percent sleep" - default 20 - depends on CPU_FREQ_GOV_LAGFREE - help - Frequency percent to step up while screen off. - -config CPU_FREQ_DEFAULT_GOV_HOTPLUG - bool "hotplug" - select CPU_FREQ_GOV_HOTPLUG - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'hotplug' as default. This allows you - to get a full dynamic frequency capable system with CPU - hotplug support by simply loading your cpufreq low-level - hardware driver. Be aware that not all cpufreq drivers - support the hotplug governor. If unsure have a look at - the help section of the driver. Fallback governor will be the - performance governor. - -config CPU_FREQ_GOV_SMOOTHASS - tristate "'smoothass' cpufreq governor" - depends on CPU_FREQ - help - Yet another governor by erasmux - -config CPU_FREQ_GOV_SCARY - tristate "'scary' cpufreq governor" - depends on CPU_FREQ - help - Use as default governor - -config CPU_FREQ_GOV_ASSWAX - tristate "'asswax' cpufreq governor" - depends on CPU_FREQ - help - Use as default governors - -config CPU_FREQ_GOV_LAGFREE - tristate "'lagfree' cpufreq governor" - depends on CPU_FREQ - help - 'lagfree' - this driver is rather similar to the 'ondemand' - governor both in its source code and its purpose, the difference is - its optimisation for better suitability in a battery powered - environment. The frequency is gracefully increased and decreased - rather than jumping to 100% when speed is required. - -config LAGFREE_MAX_LOAD - int "Max CPU Load" - default 50 - depends on CPU_FREQ_GOV_LAGFREE - help - CPU freq will be increased if measured load > max_cpu_load; - -config LAGFREE_MIN_LOAD - int "Min CPU Load" - default 15 - depends on CPU_FREQ_GOV_LAGFREE - help - CPU freq will be decrease if measured load < min_cpu_load; - -config LAGFREE_FREQ_STEP_DOWN - int "Frequency Step Down" - default 108000 - depends on CPU_FREQ_GOV_LAGFREE - help - Max freqeuncy delta when ramping down. - -config LAGFREE_FREQ_SLEEP_MAX - int "Max Sleep frequeny" - default 384000 - depends on CPU_FREQ_GOV_LAGFREE - help - Max freqeuncy for screen off. - -config LAGFREE_FREQ_AWAKE_MIN - int "Min Awake frequeny" - default 384000 - depends on CPU_FREQ_GOV_LAGFREE - help - Min freqeuncy for screen on. - -config LAGFREE_FREQ_STEP_UP_SLEEP_PERCENT - int "Freq step up percent sleep" - default 20 - depends on CPU_FREQ_GOV_LAGFREE - help - Frequency percent to step up while screen off. - -config CPU_FREQ_GOV_LAZY - - tristate "'lazy' cpufreq governor" - depends on CPU_FREQ - help - 'lazy' - a "lazy" governor - - If in doubt, say N. - -config CPU_FREQ_GOV_LULZACTIVE - - tristate "'lulzactive' cpufreq governor" - depends on CPU_FREQ - help - 'lulzactive' - a new interactive governor by Tegrak! - - If in doubt, say N. - -config CPU_FREQ_GOV_ONDEMANDX - - tristate "'ondemandx' cpufreq governor" - depends on CPU_FREQ - help - 'ondemandx' - a new iteration of the ondemand governor - - If in doubt, say N. - -config CPU_FREQ_GOV_MINMAX - - tristate "'minmax' cpufreq governor" - depends on CPU_FREQ - help - 'minmax' - this driver tries to minimize the frequency jumps by limiting - the the selected frequencies to either the min or the max frequency of - the policy. The frequency is selected according to the load. - - If in doubt, say N. - -config CPU_FREQ_GOV_SavagedZen - - tristate "'savagedzen' cpufreq governor" - depends on CPU_FREQ - help - 'Savaged-Zen' - a smartass based governor - - If in doubt, say N. - -config CPU_FREQ_GOV_BRAZILIANWAX - tristate "'brazilianwax' cpufreq governor" - depends on CPU_FREQ - help - brazilianwax' - a "slightly more agressive smart" optimized governor! - - If in doubt, say Y. - -config CPU_FREQ_GOV_HOTPLUG - tristate "'hotplug' cpufreq governor" - depends on CPU_FREQ && NO_HZ && HOTPLUG_CPU - help - 'hotplug' - this driver mimics the frequency scaling behavior - in 'ondemand', but with several key differences. First is - that frequency transitions use the CPUFreq table directly, - instead of incrementing in a percentage of the maximum - available frequency. Second 'hotplug' will offline auxillary - CPUs when the system is idle, and online those CPUs once the - system becomes busy again. This last feature is needed for - architectures which transition to low power states when only - the "master" CPU is online, or for thermally constrained - devices. - - If you don't have one of these architectures or devices, use - 'ondemand' instead. - - If in doubt, say N. +endchoice config CPU_FREQ_GOV_ABYSSPLUG tristate "'abyssplug' cpufreq governor" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 75c24cbe..be135afd 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -20,16 +20,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o -obj-$(CONFIG_CPU_FREQ_GOV_LAGFREE) += cpufreq_lagfree.o -obj-$(CONFIG_CPU_FREQ_GOV_LAZY) += cpufreq_lazy.o -obj-$(CONFIG_CPU_FREQ_GOV_ONDEMANDX) += cpufreq_ondemandx.o -obj-$(CONFIG_CPU_FREQ_GOV_lulzactive) += cpufreq_lulzactive.o -obj-$(CONFIG_CPU_FREQ_GOV_SavagedZen) += cpufreq_savagedzen.o -obj-$(CONFIG_CPU_FREQ_GOV_BRAZILIANWAX) += cpufreq_brazilianwax.o -obj-$(CONFIG_CPU_FREQ_GOV_SCARY) += cpufreq_scary.o -obj-$(CONFIG_CPU_FREQ_GOV_SMOOTHASS) += cpufreq_smoothass.o -obj-$(CONFIG_CPU_FREQ_GOV_SCARY) += cpufreq_scary.o -obj-$(CONFIG_CPU_FREQ_GOV_HOTPLUG) += cpufreq_hotplug.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o From 52bfd49456c92b3ffbbe51d21fc0422d6b99adb8 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Thu, 8 Aug 2013 01:04:36 -0400 Subject: [PATCH 29/35] gdflgndsnfdkn CPU SHIZZ --- drivers/cpufreq/Kconfig | 549 +++++++++++--- drivers/cpufreq/Makefile | 11 +- drivers/cpufreq/cpufreq_adaptive.c | 952 ------------------------ drivers/cpufreq/cpufreq_intellidemand.c | 890 ---------------------- 4 files changed, 473 insertions(+), 1929 deletions(-) delete mode 100644 drivers/cpufreq/cpufreq_adaptive.c delete mode 100644 drivers/cpufreq/cpufreq_intellidemand.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 2e07ae54..d59b441c 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -103,25 +103,151 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. +config CPU_FREQ_DEFAULT_GOV_LAGFREE + bool "lagfree" + select CPU_FREQ_GOV_LAGFREE + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lagfree' as default. This allows + you to get a full dynamic frequency capable system by simply + loading your cpufreq low-level hardware driver. + Be aware that not all cpufreq drivers support the lagfree + governor. If unsure have a look at the help section of the + driver. Fallback governor will be the performance governor. config CPU_FREQ_DEFAULT_GOV_DANCEDANCE bool "dancedance" select CPU_FREQ_GOV_DANCEDANCE help +config CPU_FREQ_DEFAULT_GOV_SMARTASS2 + bool "smartass2" + select CPU_FREQ_GOV_SMARTASS2 + help + Use the CPUFreq governor 'smartassV2' as default. + config CPU_FREQ_DEFAULT_GOV_NIGHTMARE - bool "nightmare" - select CPU_FREQ_GOV_NIGHTMARE - help + bool "nightmare" + select CPU_FREQ_GOV_NIGHTMARE + help + +config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX + bool "interactiveX" + select CPU_FREQ_GOV_INTERACTIVEX + help + Use the CPUFreq governor 'interactiveX' as default. This allows + you to get a full dynamic cpu frequency capable system by simply + loading your cpufreq low-level hardware driver, using the + 'interactiveX' governor for latency-sensitive workloads. + +config CPU_FREQ_DEFAULT_GOV_SMARTASS + bool "smartass" + select CPU_FREQ_GOV_SMARTASS + help + Use the CPUFreq governor 'smartass' as default. + +config CPU_FREQ_DEFAULT_GOV_LAZY + + bool "lazy" + select CPU_FREQ_GOV_LAZY + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lazy' as default. + Fallback governor will be the performance governor. + +config CPU_FREQ_DEFAULT_GOV_LULZACTIVE + + bool "lulzactive" + select CPU_FREQ_GOV_LULZACTIVE + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'lulzactive' as default. + Fallback governor will be the performance governor. + +config CPU_FREQ_DEFAULT_GOV_ONDEMANDX + + bool "ondemandx" + select CPU_FREQ_GOV_ONDEMANDX + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'ondemandX' as default. + Fallback governor will be the performance governor. + +config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX + bool "interactiveX" + select CPU_FREQ_GOV_INTERACTIVEX config CPU_FREQ_DEFAULT_GOV_ONDEMAND bool "ondemand" - select CPU_FREQ_GOV_ONDEMAND select CPU_FREQ_GOV_PERFORMANCE help - Use the CPUFreq governor 'ondemand' as default. This allows + Use the CPUFreq governor 'interactivex' as default. This allows + you to get a full dynamic cpu frequency capable system by simply + loading your cpufreq low-level hardware driver, using the + 'interactivex' governor for latency-sensitive workloads. +config CPU_FREQ_DEFAULT_GOV_MINMAX + + bool "minmax" + select CPU_FREQ_GOV_MINMAX + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'minmax' as default. This minimizes the + frequency jumps does by the governor. This is aimed at maximizing + both perfomance and battery life. + +config CPU_FREQ_DEFAULT_GOV_SavagedZen + + bool "smartass" + select CPU_FREQ_GOV_SavagedZen + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'SavagedZen' as default. + +config CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND + + bool "intellidemand" + select CPU_FREQ_GOV_INTELLIDEMAND + help + Use the CPUFreq governor 'intellidemand' as default. + +config CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX + bool "brazilianwax" + select CPU_FREQ_GOV_BRAZILIANWAX + help + Use the CPUFreq governor 'brazilianwax' as default. + +config CPU_FREQ_DEFAULT_GOV_WHEATLEY + bool "wheatley" + select CPU_FREQ_GOV_WHEATLEY + help + Use as default governor + +config CPU_FREQ_DEFAULT_GOV_SCARY + bool "scary" + select CPU_FREQ_GOV_SCARY + help + Use as default governor + +config CPU_FREQ_DEFAULT_GOV_SMOOTHASS + bool "smoothass" + select CPU_FREQ_GOV_SMOOTHASS + help + Use as default governor + +config CPU_FREQ_DEFAULT_GOV_ASSWAX + bool "asswax" + select CPU_FREQ_GOV_ASSWAX + help + Use as default governor + + +config CPU_FREQ_DEFAULT_GOV_BADASS + bool "badass" + select CPU_FREQ_GOV_BADASS + help + Use the CPUFreq governor 'badass' as default. This allows you to get a full dynamic frequency capable system by simply loading your cpufreq low-level hardware driver. - Be aware that not all cpufreq drivers support the ondemand + Be aware that not all cpufreq drivers support the badass governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. @@ -148,20 +274,43 @@ config CPU_FREQ_DEFAULT_GOV_POWERSAVE the frequency statically to the lowest frequency supported by the CPU. + For details, take a look at . + + If in doubt, say Y. + +config CPU_FREQ_GOV_SMARTASS2 + tristate "'smartassV2' cpufreq governor" + depends on CPU_FREQ + help + 'smartassV2' - a "smart" governor + + If in doubt, say N. + config CPU_FREQ_DEFAULT_GOV_SMARTASSH3 bool "smartassH3" select CPU_FREQ_GOV_SMARTASSH3 help - Use the CPUFreq governor 'slp' as default. + +config CPU_FREQ_GOV_BRAZILIANWAX + tristate "'brazilianwax' cpufreq governor" + depends on CPU_FREQ + help + 'brazilianwax' - a "smart" governor + + If in doubt, say N. + +config CPU_FREQ_GOV_INTERACTIVEX + tristate "'interactiveX' cpufreq policy governor" + help + 'interactiveX' - Modified version of interactive with sleep+wake code. -config CPU_FREQ_DEFAULT_GOV_USERSPACE - bool "userspace" - select CPU_FREQ_GOV_USERSPACE +config CPU_FREQ_GOV_LULZACTIVE + tristate "'lulzactive' cpufreq governor" + depends on CPU_FREQ help - Use the CPUFreq governor 'userspace' as default. This allows - you to set the CPU frequency manually or when a userspace - program shall be able to set the CPU dynamically without having - to enable the userspace governor manually. + 'lulzactive' - a new interactive governor by Tegrak! + + If in doubt, say N. config CPU_FREQ_DEFAULT_GOV_WHEATLEY bool "wheatley" @@ -190,32 +339,89 @@ config CPU_FREQ_GOV_ABYSSPLUG 'ondemand' instead. If in doubt, say N. -config CPU_FREQ_GOV_ADAPTIVE - tristate "'adaptive' cpufreq policy governor" +config CPU_FREQ_GOV_ONDEMAND + tristate "'ondemand' cpufreq policy governor" + select CPU_FREQ_TABLE help - 'adaptive' - This driver adds a dynamic cpufreq policy governor - designed for latency-sensitive workloads and also for demanding - performance. - - This governor attempts to reduce the latency of clock - increases so that the system is more responsive to - interactive workloads in loweset steady-state but to - to reduce power consumption in middle operation level level up - will be done in step by step to prohibit system from going to - max operation level. + 'ondemand' - This driver adds a dynamic cpufreq policy governor. + The governor does a periodic polling and + changes frequency based on the CPU utilization. + The support for this governor depends on CPU capability to + do fast frequency switching (i.e, very low latency frequency + transitions). To compile this driver as a module, choose M here: the - module will be called cpufreq_adaptive. + module will be called cpufreq_ondemand. For details, take a look at linux/Documentation/cpu-freq. If in doubt, say N. +config CPU_FREQ_GOV_ONDEMAND_2_PHASE + tristate "'2-phase' power-efficiency ondemand algorithm" + depends on CPU_FREQ_GOV_ONDEMAND + help + '2-phase' - This driver adds a new algo to save power + +config CPU_FREQ_GOV_LAZY + tristate "'lazy' cpufreq governor" + depends on CPU_FREQ + config CPU_FREQ_GOV_ASSWAX tristate "'asswax' cpufreq governor" depends on CPU_FREQ + +config CPU_FREQ_GOV_LAGFREE + tristate "'lagfree' cpufreq governor" + depends on CPU_FREQ + help + 'lagfree' - this driver is rather similar to the 'ondemand' + governor both in its source code and its purpose, the difference is + its optimisation for better suitability in a battery powered + environment. The frequency is gracefully increased and decreased + rather than jumping to 100% when speed is required. + +config LAGFREE_MAX_LOAD + int "Max CPU Load" + default 50 + depends on CPU_FREQ_GOV_LAGFREE help - Use as default governors + CPU freq will be increased if measured load > max_cpu_load; + +config LAGFREE_MIN_LOAD + int "Min CPU Load" + default 15 + depends on CPU_FREQ_GOV_LAGFREE + help + CPU freq will be decrease if measured load < min_cpu_load; + +config LAGFREE_FREQ_STEP_DOWN + int "Frequency Step Down" + default 108000 + depends on CPU_FREQ_GOV_LAGFREE + help + Max freqeuncy delta when ramping down. + +config LAGFREE_FREQ_SLEEP_MAX + int "Max Sleep frequeny" + default 384000 + depends on CPU_FREQ_GOV_LAGFREE + help + Max freqeuncy for screen off. + +config LAGFREE_FREQ_AWAKE_MIN + int "Min Awake frequeny" + default 384000 + depends on CPU_FREQ_GOV_LAGFREE + help + Min freqeuncy for screen on. + +config LAGFREE_FREQ_STEP_UP_SLEEP_PERCENT + int "Freq step up percent sleep" + default 20 + depends on CPU_FREQ_GOV_LAGFREE + help + Frequency percent to step up while screen off. config CPU_FREQ_GOV_BADASS tristate "'badass' cpufreq policy governor" @@ -225,11 +431,78 @@ config CPU_FREQ_GOV_BADASS The governor does a periodic polling and changes frequency based on the CPU utilization. The support for this governor depends on CPU capability to - do fast frequency switching (i.e, very low latency frequency transitions). + do fast frequency switching (i.e, very low latency frequency + transitions). + To compile this driver as a module, choose M here: the module will be called cpufreq_badass. + If in doubt, say N. +config CPU_FREQ_GOV_BADASS_2_PHASE + tristate "'2-phase' power-efficiency badass algorithm" + depends on CPU_FREQ_GOV_BADASS + help + '2-phase' - This driver adds a new algo to save power + +config CPU_FREQ_GOV_BADASS_2_PHASE_FREQ + int "'2-phase' badass frequency" + default 918000 + depends on CPU_FREQ_GOV_BADASS + depends on CPU_FREQ_GOV_BADASS_2_PHASE + +config CPU_FREQ_GOV_BADASS_3_PHASE + tristate "'3-phase' power-efficiency badass algorithm" + depends on CPU_FREQ_GOV_BADASS + depends on CPU_FREQ_GOV_BADASS_2_PHASE + help + '3-phase' - This driver adds a new algo to save power + +config CPU_FREQ_GOV_BADASS_3_PHASE_FREQ + int "'3-phase' badass frequency" + default 1188000 + depends on CPU_FREQ_GOV_BADASS + depends on CPU_FREQ_GOV_BADASS_2_PHASE + depends on CPU_FREQ_GOV_BADASS_3_PHASE + + +config CPU_FREQ_GOV_INTERACTIVE + tristate "'interactive' cpufreq policy governor" + help + 'interactive' - This driver adds a dynamic cpufreq policy governor + designed for latency-sensitive workloads. + + This governor attempts to reduce the latency of clock + increases so that the system is more responsive to + interactive workloads. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_interactive. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +config CPU_FREQ_GOV_SMARTASS2 + tristate "'smartassV2' cpufreq governor" + depends on CPU_FREQ + help + 'smartassV2' - a "smart" governor + If in doubt, say N. + +config CPU_FREQ_DEFAULT_GOV_HOTPLUG + bool "hotplug" + select CPU_FREQ_GOV_HOTPLUG + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'hotplug' as default. This allows you + to get a full dynamic frequency capable system with CPU + hotplug support by simply loading your cpufreq low-level + hardware driver. Be aware that not all cpufreq drivers + support the hotplug governor. If unsure have a look at + the help section of the driver. Fallback governor will be the + performance governor. + config CPU_FREQ_GOV_CONSERVATIVE tristate "'conservative' cpufreq governor" depends on CPU_FREQ @@ -261,98 +534,206 @@ config CPU_FREQ_GOV_NIGHTMARE tristate "'nightmare' cpufreq governor" depends on CPU_FREQ -config CPU_FREQ_GOV_ONDEMAND - tristate "'ondemand' cpufreq policy governor" - select CPU_FREQ_TABLE +config CPU_FREQ_GOV_SMOOTHASS + tristate "'smoothass' cpufreq governor" + depends on CPU_FREQ help - 'ondemand' - This driver adds a dynamic cpufreq policy governor. - The governor does a periodic polling and - changes frequency based on the CPU utilization. - The support for this governor depends on CPU capability to - do fast frequency switching (i.e, very low latency frequency - transitions). + Yet another governor by erasmux + +config CPU_FREQ_GOV_SCARY + tristate "'scary' cpufreq governor" + depends on CPU_FREQ + help + Use as default governor + +config CPU_FREQ_GOV_ASSWAX + tristate "'asswax' cpufreq governor" + depends on CPU_FREQ + help + Use as default governors + +config CPU_FREQ_GOV_LAGFREE + tristate "'lagfree' cpufreq governor" + depends on CPU_FREQ + help + 'lagfree' - this driver is rather similar to the 'ondemand' + governor both in its source code and its purpose, the difference is + its optimisation for better suitability in a battery powered + environment. The frequency is gracefully increased and decreased + rather than jumping to 100% when speed is required. + +config LAGFREE_MAX_LOAD + int "Max CPU Load" + default 50 + depends on CPU_FREQ_GOV_LAGFREE + help + CPU freq will be increased if measured load > max_cpu_load; - To compile this driver as a module, choose M here: the - module will be called cpufreq_ondemand. +config LAGFREE_MIN_LOAD + int "Min CPU Load" + default 15 + depends on CPU_FREQ_GOV_LAGFREE + help + CPU freq will be decrease if measured load < min_cpu_load; - For details, take a look at linux/Documentation/cpu-freq. +config LAGFREE_FREQ_STEP_DOWN + int "Frequency Step Down" + default 108000 + depends on CPU_FREQ_GOV_LAGFREE + help + Max freqeuncy delta when ramping down. - If in doubt, say N. +config LAGFREE_FREQ_SLEEP_MAX + int "Max Sleep frequeny" + default 384000 + depends on CPU_FREQ_GOV_LAGFREE + help + Max freqeuncy for screen off. -config CPU_FREQ_GOV_PERFORMANCE - tristate "'performance' governor" +config LAGFREE_FREQ_AWAKE_MIN + int "Min Awake frequeny" + default 384000 + depends on CPU_FREQ_GOV_LAGFREE help - This cpufreq governor sets the frequency statically to the - highest available CPU frequency. + Min freqeuncy for screen on. - To compile this driver as a module, choose M here: the - module will be called cpufreq_performance. +config LAGFREE_FREQ_STEP_UP_SLEEP_PERCENT + int "Freq step up percent sleep" + default 20 + depends on CPU_FREQ_GOV_LAGFREE + help + Frequency percent to step up while screen off. - If in doubt, say Y. +config CPU_FREQ_MIN_TICKS + int "Ticks between governor polling interval." + default 10 + help + Minimum number of ticks between polling interval for governors. -config CPU_FREQ_GOV_POWERSAVE - tristate "'powersave' governor" +config CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER + int "Sampling rate multiplier for governors." + default 1000 help - This cpufreq governor sets the frequency statically to the - lowest available CPU frequency. + Sampling latency rate multiplied by the cpu switch latency. + Affects governor polling. - To compile this driver as a module, choose M here: the - module will be called cpufreq_powersave. +config CPU_FREQ_GOV_SMARTASS + tristate "'smartass' cpufreq governor" + depends on CPU_FREQ + help + 'smartass' - a "smart" governor + If in doubt, say N. + +config CPU_FREQ_GOV_LAZY - If in doubt, say Y. + tristate "'lazy' cpufreq governor" + depends on CPU_FREQ + help + 'lazy' - a "lazy" governor + + If in doubt, say N. -config CPU_FREQ_GOV_SLP - tristate "'slp' cpufreq policy governor" +config CPU_FREQ_GOV_LULZACTIVE -config CPU_FREQ_GOV_SMARTASSH3 - tristate "'smartassH3' cpufreq governor" + tristate "'lulzactive' cpufreq governor" depends on CPU_FREQ help - 'smartassH3' - a "smart" governor + 'lulzactive' - a new interactive governor by Tegrak! -config CPU_FREQ_GOV_USERSPACE - tristate "'userspace' governor for userspace frequency scaling" + If in doubt, say N. + +config CPU_FREQ_GOV_ONDEMANDX + + tristate "'ondemandx' cpufreq governor" + depends on CPU_FREQ help - Enable this cpufreq governor when you either want to set the - CPU frequency manually or when a userspace program shall - be able to set the CPU dynamically, like on LART - . + 'ondemandx' - a new iteration of the ondemand governor - To compile this driver as a module, choose M here: the - module will be called cpufreq_userspace. + If in doubt, say N. - For details, take a look at . +config CPU_FREQ_GOV_INTERACTIVEX - If in doubt, say Y. + tristate "'interactiveX' cpufreq governor" + depends on CPU_FREQ + help + 'interactiveX' - Modified version of interactive with sleep+wake code. + + If in doubt, say N. + +config CPU_FREQ_GOV_MINMAX + + tristate "'minmax' cpufreq governor" + depends on CPU_FREQ + help + 'minmax' - this driver tries to minimize the frequency jumps by limiting + the the selected frequencies to either the min or the max frequency of + the policy. The frequency is selected according to the load. + + If in doubt, say N. + +config CPU_FREQ_GOV_SavagedZen + + tristate "'savagedzen' cpufreq governor" + depends on CPU_FREQ + help + 'Savaged-Zen' - a smartass based governor + + If in doubt, say N. + +config CPU_FREQ_GOV_INTELLIDEMAND + tristate "'intellidemand' cpufreq governor" + depends on CPU_FREQ config CPU_FREQ_GOV_WHEATLEY tristate "'wheatley' cpufreq governor" depends on CPU_FREQ + help + 'wheatley' - a performance based governor config SEC_DVFS bool "DVFS job" default n - depends on CPU_FREQ + depends on CPU_FREQ_STAT + help + 'intellidemand' - A dynamic cpufreq governor for Low Latency Frequency + Transition capable processors -config SEC_DVFS_BOOSTER - bool "DVFS input booster" - default y - depends on SEC_DVFS + If in doubt, say N. + +config CPU_FREQ_GOV_BRAZILIANWAX + tristate "'brazilianwax' cpufreq governor" + depends on CPU_FREQ + help + brazilianwax' - a "slightly more agressive smart" optimized governor! + + If in doubt, say Y. + menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" endmenu -menu "ARM CPU frequency scaling drivers" -depends on ARM -source "drivers/cpufreq/Kconfig.arm" -endmenu +config CPU_FREQ_GOV_HOTPLUG + tristate "'hotplug' cpufreq governor" + depends on CPU_FREQ && NO_HZ && HOTPLUG_CPU + help + 'hotplug' - this driver mimics the frequency scaling behavior + in 'ondemand', but with several key differences. First is + that frequency transitions use the CPUFreq table directly, + instead of incrementing in a percentage of the maximum + available frequency. Second 'hotplug' will offline auxillary + CPUs when the system is idle, and online those CPUs once the + system becomes busy again. This last feature is needed for + architectures which transition to low power states when only + the "master" CPU is online, or for thermally constrained + devices. + + If you don't have one of these architectures or devices, use + 'ondemand' instead. + + If in doubt, say N. -menu "PowerPC CPU frequency scaling drivers" -depends on PPC32 || PPC64 -source "drivers/cpufreq/Kconfig.powerpc" -endmenu endif endmenu diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index be135afd..bc6f220f 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -16,15 +16,20 @@ obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o obj-$(CONFIG_CPU_FREQ_GOV_SLP) += cpufreq_slp.o obj-$(CONFIG_CPU_FREQ_GOV_SMARTASSH3) += cpufreq_smartassH3.o +obj-$(CONFIG_CPU_FREQ_GOV_lulzactive) += cpufreq_lulzactive.o +obj-$(CONFIG_CPU_FREQ_GOV_SavagedZen) += cpufreq_savagedzen.o +obj-$(CONFIG_CPU_FREQ_GOV_BRAZILIANWAX) += cpufreq_brazilianwax.o +obj-$(CONFIG_CPU_FREQ_GOV_SCARY) += cpufreq_scary.o +obj-$(CONFIG_CPU_FREQ_GOV_SMOOTHASS) += cpufreq_smoothass.o +obj-$(CONFIG_CPU_FREQ_GOV_SCARY) += cpufreq_scary.o +obj-$(CONFIG_CPU_FREQ_GOV_HOTPLUG) += cpufreq_hotplug.o obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o -obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o -obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o -################################################################################## +##################################################################################d # x86 drivers. # Link order matters. K8 is preferred to ACPI because of firmware bugs in early # K8 systems. ACPI is preferred to all other hardware-specific drivers. diff --git a/drivers/cpufreq/cpufreq_adaptive.c b/drivers/cpufreq/cpufreq_adaptive.c deleted file mode 100644 index 2eff3e28..00000000 --- a/drivers/cpufreq/cpufreq_adaptive.c +++ /dev/null @@ -1,952 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_adaptive.c - * - * Copyright (C) 2001 Russell King - * (C) 2003 Venkatesh Pallipadi . - * Jun Nakajima - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ - -#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) -#define DEF_FREQUENCY_UP_THRESHOLD (80) -#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) -#define MICRO_FREQUENCY_UP_THRESHOLD (95) -#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) -#define MIN_FREQUENCY_UP_THRESHOLD (11) -#define MAX_FREQUENCY_UP_THRESHOLD (100) -#define MIN_ONDEMAND_THRESHOLD (4) -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) - * this governor will not work. - * All times here are in uS. - */ -#define MIN_SAMPLING_RATE_RATIO (2) - -static unsigned int min_sampling_rate; - -#define LATENCY_MULTIPLIER (1000) -#define MIN_LATENCY_MULTIPLIER (100) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) - -static void (*pm_idle_old)(void); -static void do_dbs_timer(struct work_struct *work); -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ADAPTIVE -static -#endif -struct cpufreq_governor cpufreq_gov_adaptive = { - .name = "adaptive", - .governor = cpufreq_governor_dbs, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, -}; - -/* Sampling types */ -enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; - -struct cpu_dbs_info_s { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_iowait; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - struct cpufreq_frequency_table *freq_table; - unsigned int freq_hi_jiffies; - int cpu; - unsigned int sample_type:1; - bool ondemand; - /* - * percpu mutex that serializes governor limit change with - * do_dbs_timer invocation. We do not want do_dbs_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - -/* - * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on - * different CPUs. It protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); -static struct task_struct *up_task; -static struct workqueue_struct *down_wq; -static struct work_struct freq_scale_down_work; -static cpumask_t up_cpumask; -static spinlock_t up_cpumask_lock; -static cpumask_t down_cpumask; -static spinlock_t down_cpumask_lock; - -static DEFINE_PER_CPU(cputime64_t, idle_in_idle); -static DEFINE_PER_CPU(cputime64_t, idle_exit_wall); - -static struct timer_list cpu_timer; -static unsigned int target_freq; -static DEFINE_MUTEX(short_timer_mutex); - -/* Go to max speed when CPU load at or above this value. */ -#define DEFAULT_GO_MAXSPEED_LOAD 60 -static unsigned long go_maxspeed_load; - -#define DEFAULT_KEEP_MINSPEED_LOAD 30 -static unsigned long keep_minspeed_load; - -#define DEFAULT_STEPUP_LOAD 10 -static unsigned long step_up_load; - -static struct dbs_tuners { - unsigned int sampling_rate; - unsigned int up_threshold; - unsigned int down_differential; - unsigned int ignore_nice; - unsigned int io_is_busy; -} dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, - .ignore_nice = 0, -}; - -static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) -{ - u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); - - if (iowait_time == -1ULL) - return 0; - - return iowait_time; -} - -static void adaptive_init_cpu(int cpu) -{ - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - dbs_info->freq_table = cpufreq_frequency_get_table(cpu); -} - -/************************** sysfs interface ************************/ - -static ssize_t show_sampling_rate_max(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - printk_once(KERN_INFO "CPUFREQ: adaptive sampling_rate_max " - "sysfs file is deprecated - used by: %s\n", current->comm); - return sprintf(buf, "%u\n", -1U); -} - -static ssize_t show_sampling_rate_min(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", min_sampling_rate); -} - -define_one_global_ro(sampling_rate_max); -define_one_global_ro(sampling_rate_min); - -/* cpufreq_adaptive Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(io_is_busy, io_is_busy); -show_one(up_threshold, up_threshold); -show_one(ignore_nice_load, ignore_nice); - -/*** delete after deprecation time ***/ - -#define DEPRECATION_MSG(file_name) \ - printk_once(KERN_INFO "CPUFREQ: Per core adaptive sysfs " \ - "interface is deprecated - " #file_name "\n"); - -#define show_one_old(file_name) \ -static ssize_t show_##file_name##_old \ -(struct cpufreq_policy *unused, char *buf) \ -{ \ - printk_once(KERN_INFO "CPUFREQ: Per core adaptive sysfs " \ - "interface is deprecated - " #file_name "\n"); \ - return show_##file_name(NULL, NULL, buf); \ -} - -/*** delete after deprecation time ***/ - -static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.io_is_busy = !!input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || - input < MIN_FREQUENCY_UP_THRESHOLD) { - return -EINVAL; - } - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.up_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - mutex_lock(&dbs_mutex); - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - mutex_unlock(&dbs_mutex); - return count; - } - dbs_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(od_cpu_dbs_info, j); - dbs_info->prev_cpu_idle = get_cpu_idle_time_us(j, - &dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - - } - mutex_unlock(&dbs_mutex); - - return count; -} - -define_one_global_rw(sampling_rate); -define_one_global_rw(io_is_busy); -define_one_global_rw(up_threshold); -define_one_global_rw(ignore_nice_load); - -static struct attribute *dbs_attributes[] = { - &sampling_rate_max.attr, - &sampling_rate_min.attr, - &sampling_rate.attr, - &up_threshold.attr, - &ignore_nice_load.attr, - &io_is_busy.attr, - NULL -}; - -static struct attribute_group dbs_attr_group = { - .attrs = dbs_attributes, - .name = "adaptive", -}; - -/*** delete after deprecation time ***/ - -#define write_one_old(file_name) \ -static ssize_t store_##file_name##_old \ -(struct cpufreq_policy *unused, const char *buf, size_t count) \ -{ \ - printk_once(KERN_INFO "CPUFREQ: Per core adaptive sysfs " \ - "interface is deprecated - " #file_name "\n"); \ - return store_##file_name(NULL, NULL, buf, count); \ -} - -static void cpufreq_adaptive_timer(unsigned long data) -{ - cputime64_t cur_idle; - cputime64_t cur_wall; - unsigned int delta_idle; - unsigned int delta_time; - int short_load; - unsigned int new_freq; - unsigned long flags; - struct cpu_dbs_info_s *this_dbs_info; - struct cpufreq_policy *policy; - unsigned int j; - unsigned int index; - unsigned int max_load = 0; - - this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); - - policy = this_dbs_info->cur_policy; - - for_each_online_cpu(j) { - cur_idle = get_cpu_idle_time_us(j, &cur_wall); - - delta_idle = (unsigned int) cputime64_sub(cur_idle, - per_cpu(idle_in_idle, j)); - delta_time = (unsigned int) cputime64_sub(cur_wall, - per_cpu(idle_exit_wall, j)); - - /* - * If timer ran less than 1ms after short-term sample started, retry. - */ - if (delta_time < 1000) - goto do_nothing; - - if (delta_idle > delta_time) - short_load = 0; - else - short_load = 100 * (delta_time - delta_idle) / delta_time; - - if (short_load > max_load) - max_load = short_load; - } - - if (this_dbs_info->ondemand) - goto do_nothing; - - if (max_load >= go_maxspeed_load) - new_freq = policy->max; - else - new_freq = policy->max * max_load / 100; - - if ((max_load <= keep_minspeed_load) && - (policy->cur == policy->min)) - new_freq = policy->cur; - - if (cpufreq_frequency_table_target(policy, this_dbs_info->freq_table, - new_freq, CPUFREQ_RELATION_L, - &index)) { - goto do_nothing; - } - - new_freq = this_dbs_info->freq_table[index].frequency; - - target_freq = new_freq; - - if (new_freq < this_dbs_info->cur_policy->cur) { - spin_lock_irqsave(&down_cpumask_lock, flags); - cpumask_set_cpu(0, &down_cpumask); - spin_unlock_irqrestore(&down_cpumask_lock, flags); - queue_work(down_wq, &freq_scale_down_work); - } else { - spin_lock_irqsave(&up_cpumask_lock, flags); - cpumask_set_cpu(0, &up_cpumask); - spin_unlock_irqrestore(&up_cpumask_lock, flags); - wake_up_process(up_task); - } - - return; - -do_nothing: - for_each_online_cpu(j) { - per_cpu(idle_in_idle, j) = - get_cpu_idle_time_us(j, - &per_cpu(idle_exit_wall, j)); - } - mod_timer(&cpu_timer, jiffies + 2); - schedule_delayed_work_on(0, &this_dbs_info->work, 10); - - if (mutex_is_locked(&short_timer_mutex)) - mutex_unlock(&short_timer_mutex); - return; -} - -/*** delete after deprecation time ***/ - -/************************** sysfs end ************************/ - -static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) -{ -#ifndef CONFIG_ARCH_EXYNOS4 - if (p->cur == p->max) - return; -#endif - __cpufreq_driver_target(p, freq, CPUFREQ_RELATION_H); -} - -static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) -{ - unsigned int max_load_freq; - - struct cpufreq_policy *policy; - unsigned int j; - - unsigned int index, new_freq; - unsigned int longterm_load = 0; - - policy = this_dbs_info->cur_policy; - - /* - * Every sampling_rate, we check, if current idle time is less - * than 20% (default), then we try to increase frequency - * Every sampling_rate, we look for a the lowest - * frequency which can sustain the load while keeping idle time over - * 30%. If such a frequency exist, we try to decrease to this frequency. - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of current frequency - */ - - /* Get Absolute Load - in terms of freq */ - max_load_freq = 0; - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; - unsigned int idle_time, wall_time, iowait_time; - unsigned int load, load_freq; - int freq_avg; - - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - - cur_idle_time = get_cpu_idle_time_us(j, &cur_wall_time); - cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - - wall_time = (unsigned int) cputime64_sub(cur_wall_time, - j_dbs_info->prev_cpu_wall); - j_dbs_info->prev_cpu_wall = cur_wall_time; - - idle_time = (unsigned int) cputime64_sub(cur_idle_time, - j_dbs_info->prev_cpu_idle); - j_dbs_info->prev_cpu_idle = cur_idle_time; - - iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, - j_dbs_info->prev_cpu_iowait); - j_dbs_info->prev_cpu_iowait = cur_iowait_time; - - if (dbs_tuners_ins.ignore_nice) { - cputime64_t cur_nice; - unsigned long cur_nice_jiffies; - - cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - - j_dbs_info->prev_cpu_nice; - /* - * Assumption: nice time between sampling periods will - * be less than 2^32 jiffies for 32 bit sys - */ - cur_nice_jiffies = (unsigned long) - cputime64_to_jiffies64(cur_nice); - - j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - idle_time += jiffies_to_usecs(cur_nice_jiffies); - } - - /* - * For the purpose of adaptive, waiting for disk IO is an - * indication that you're performance critical, and not that - * the system is actually idle. So subtract the iowait time - * from the cpu idle time. - */ - - if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) - idle_time -= iowait_time; - - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - load = 100 * (wall_time - idle_time) / wall_time; - - if (load > longterm_load) - longterm_load = load; - - freq_avg = __cpufreq_driver_getavg(policy, j); - if (freq_avg <= 0) - freq_avg = policy->cur; - - load_freq = load * freq_avg; - - if (load_freq > max_load_freq) - max_load_freq = load_freq; - } - - if (longterm_load >= MIN_ONDEMAND_THRESHOLD) - this_dbs_info->ondemand = true; - else - this_dbs_info->ondemand = false; - - /* Check for frequency increase */ - if (max_load_freq > (dbs_tuners_ins.up_threshold * policy->cur)) { - cpufreq_frequency_table_target(policy, - this_dbs_info->freq_table, - (policy->cur + step_up_load), - CPUFREQ_RELATION_L, &index); - - new_freq = this_dbs_info->freq_table[index].frequency; - dbs_freq_increase(policy, new_freq); - return; - } - - /* Check for frequency decrease */ - /* if we cannot reduce the frequency anymore, break out early */ -#ifndef CONFIG_ARCH_EXYNOS4 - if (policy->cur == policy->min) - return; -#endif - /* - * The optimal frequency is the frequency that is the lowest that - * can support the current CPU usage without triggering the up - * policy. To be safe, we focus 10 points under the threshold. - */ - if (max_load_freq < - (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * - policy->cur) { - unsigned int freq_next; - freq_next = max_load_freq / - (dbs_tuners_ins.up_threshold - - dbs_tuners_ins.down_differential); - - if (freq_next < policy->min) - freq_next = policy->min; - - __cpufreq_driver_target(policy, freq_next, - CPUFREQ_RELATION_L); - } -} - -static void do_dbs_timer(struct work_struct *work) -{ - struct cpu_dbs_info_s *dbs_info = - container_of(work, struct cpu_dbs_info_s, work.work); - unsigned int cpu = dbs_info->cpu; - - int delay; - - mutex_lock(&dbs_info->timer_mutex); - - /* Common NORMAL_SAMPLE setup */ - dbs_info->sample_type = DBS_NORMAL_SAMPLE; - dbs_check_cpu(dbs_info); - - /* We want all CPUs to do sampling nearly on - * same jiffy - */ - delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - schedule_delayed_work_on(cpu, &dbs_info->work, delay); - - mutex_unlock(&dbs_info->timer_mutex); -} - -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) -{ - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - dbs_info->sample_type = DBS_NORMAL_SAMPLE; - INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); -} - -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) -{ - cancel_delayed_work_sync(&dbs_info->work); -} - -/* - * Not all CPUs want IO time to be accounted as busy; this dependson how - * efficient idling at a higher frequency/voltage is. - * Pavel Machek says this is not so for various generations of AMD and old - * Intel systems. - * Mike Chan (androidlcom) calis this is also not true for ARM. - * Because of this, whitelist specific known (series) of CPUs by default, and - * leave all others up to the user. - */ -static int should_io_be_busy(void) -{ -#if defined(CONFIG_X86) - /* - * For Intel, Core 2 (model 15) andl later have an efficient idle. - */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_data.x86 == 6 && - boot_cpu_data.x86_model >= 15) - return 1; -#endif - return 0; -} - -static void cpufreq_adaptive_idle(void) -{ - int i; - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, 0); - struct cpufreq_policy *policy; - - policy = dbs_info->cur_policy; - - pm_idle_old(); - - if ((policy->cur == policy->min) || - (policy->cur == policy->max)) { - - if (timer_pending(&cpu_timer)) - return; - - if (mutex_trylock(&short_timer_mutex)) { - for_each_online_cpu(i) { - per_cpu(idle_in_idle, i) = - get_cpu_idle_time_us(i, - &per_cpu(idle_exit_wall, i)); - } - - mod_timer(&cpu_timer, jiffies + 2); - cancel_delayed_work(&dbs_info->work); - } - } else { - if (timer_pending(&cpu_timer)) - del_timer(&cpu_timer); - - } -} - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - mutex_lock(&dbs_mutex); - - rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - dbs_enable++; - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle = get_cpu_idle_time_us(j, - &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) { - j_dbs_info->prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - } - this_dbs_info->cpu = cpu; - adaptive_init_cpu(cpu); - - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - unsigned int latency; - - rc = sysfs_create_group(cpufreq_global_kobject, - &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - /* Bring kernel and HW constraints together */ - min_sampling_rate = max(min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - dbs_tuners_ins.sampling_rate = - max(min_sampling_rate, - latency * LATENCY_MULTIPLIER); - dbs_tuners_ins.io_is_busy = should_io_be_busy(); - } - mutex_unlock(&dbs_mutex); - - mutex_init(&this_dbs_info->timer_mutex); - dbs_timer_init(this_dbs_info); - - pm_idle_old = pm_idle; - pm_idle = cpufreq_adaptive_idle; - break; - - case CPUFREQ_GOV_STOP: - dbs_timer_exit(this_dbs_info); - - mutex_lock(&dbs_mutex); - sysfs_remove_group(&policy->kobj, &dbs_attr_group); - mutex_destroy(&this_dbs_info->timer_mutex); - dbs_enable--; - mutex_unlock(&dbs_mutex); - if (!dbs_enable) - sysfs_remove_group(cpufreq_global_kobject, - &dbs_attr_group); - - pm_idle = pm_idle_old; - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_dbs_info->timer_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&this_dbs_info->timer_mutex); - break; - } - return 0; -} - -static inline void cpufreq_adaptive_update_time(void) -{ - struct cpu_dbs_info_s *this_dbs_info; - struct cpufreq_policy *policy; - int j; - - this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); - policy = this_dbs_info->cur_policy; - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; - - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - - cur_idle_time = get_cpu_idle_time_us(j, &cur_wall_time); - cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - - j_dbs_info->prev_cpu_wall = cur_wall_time; - - j_dbs_info->prev_cpu_idle = cur_idle_time; - - j_dbs_info->prev_cpu_iowait = cur_iowait_time; - - if (dbs_tuners_ins.ignore_nice) - j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - - } - -} - -static int cpufreq_adaptive_up_task(void *data) -{ - unsigned long flags; - struct cpu_dbs_info_s *this_dbs_info; - struct cpufreq_policy *policy; - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); - policy = this_dbs_info->cur_policy; - - while (1) { - set_current_state(TASK_INTERRUPTIBLE); - spin_lock_irqsave(&up_cpumask_lock, flags); - - if (cpumask_empty(&up_cpumask)) { - spin_unlock_irqrestore(&up_cpumask_lock, flags); - schedule(); - - if (kthread_should_stop()) - break; - - spin_lock_irqsave(&up_cpumask_lock, flags); - } - - set_current_state(TASK_RUNNING); - - cpumask_clear(&up_cpumask); - spin_unlock_irqrestore(&up_cpumask_lock, flags); - - __cpufreq_driver_target(this_dbs_info->cur_policy, - target_freq, - CPUFREQ_RELATION_H); - if (policy->cur != policy->max) { - mutex_lock(&this_dbs_info->timer_mutex); - - schedule_delayed_work_on(0, &this_dbs_info->work, delay); - mutex_unlock(&this_dbs_info->timer_mutex); - cpufreq_adaptive_update_time(); - } - if (mutex_is_locked(&short_timer_mutex)) - mutex_unlock(&short_timer_mutex); - } - - return 0; -} - -static void cpufreq_adaptive_freq_down(struct work_struct *work) -{ - unsigned long flags; - struct cpu_dbs_info_s *this_dbs_info; - struct cpufreq_policy *policy; - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - spin_lock_irqsave(&down_cpumask_lock, flags); - cpumask_clear(&down_cpumask); - spin_unlock_irqrestore(&down_cpumask_lock, flags); - - this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); - policy = this_dbs_info->cur_policy; - - __cpufreq_driver_target(this_dbs_info->cur_policy, - target_freq, - CPUFREQ_RELATION_H); - - if (policy->cur != policy->min) { - mutex_lock(&this_dbs_info->timer_mutex); - - schedule_delayed_work_on(0, &this_dbs_info->work, delay); - mutex_unlock(&this_dbs_info->timer_mutex); - cpufreq_adaptive_update_time(); - } - - if (mutex_is_locked(&short_timer_mutex)) - mutex_unlock(&short_timer_mutex); -} - -static int __init cpufreq_gov_dbs_init(void) -{ - cputime64_t wall; - u64 idle_time; - int cpu = get_cpu(); - - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD; - keep_minspeed_load = DEFAULT_KEEP_MINSPEED_LOAD; - step_up_load = DEFAULT_STEPUP_LOAD; - - idle_time = get_cpu_idle_time_us(cpu, &wall); - put_cpu(); - if (idle_time != -1ULL) { - /* Idle micro accounting is supported. Use finer thresholds */ - dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; - dbs_tuners_ins.down_differential = - MICRO_FREQUENCY_DOWN_DIFFERENTIAL; - /* - * In no_hz/micro accounting case we set the minimum frequency - * not depending on HZ, but fixed (very low). The deferred - * timer might skip some samples if idle/sleeping as needed. - */ - min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; - } else { - /* For correct statistics, we need 10 ticks for each measure */ - min_sampling_rate = - MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); - } - - init_timer(&cpu_timer); - cpu_timer.function = cpufreq_adaptive_timer; - - up_task = kthread_create(cpufreq_adaptive_up_task, NULL, - "kadaptiveup"); - - if (IS_ERR(up_task)) - return PTR_ERR(up_task); - - sched_setscheduler_nocheck(up_task, SCHED_FIFO, ¶m); - get_task_struct(up_task); - - /* No rescuer thread, bind to CPU queuing the work for possibly - warm cache (probably doesn't matter much). */ - down_wq = alloc_workqueue("kadaptive_down", 0, 1); - - if (!down_wq) - goto err_freeuptask; - - INIT_WORK(&freq_scale_down_work, cpufreq_adaptive_freq_down); - - - return cpufreq_register_governor(&cpufreq_gov_adaptive); -err_freeuptask: - put_task_struct(up_task); - return -ENOMEM; -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_adaptive); -} - - -MODULE_AUTHOR("Venkatesh Pallipadi "); -MODULE_AUTHOR("Alexey Starikovskiy "); -MODULE_DESCRIPTION("'cpufreq_adaptive' - A dynamic cpufreq governor for " - "Low Latency Frequency Transition capable processors"); -MODULE_LICENSE("GPL"); - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ADAPTIVE -fs_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); -#endif -module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_intellidemand.c b/drivers/cpufreq/cpufreq_intellidemand.c deleted file mode 100644 index f0a5630e..00000000 --- a/drivers/cpufreq/cpufreq_intellidemand.c +++ /dev/null @@ -1,890 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_intellidemand.c - * - * Copyright (C) 2001 Russell King - * (C) 2003 Venkatesh Pallipadi . - * Jun Nakajima - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define _LIMIT_LCD_OFF_CPU_MAX_FREQ_ - -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ - -#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) -#define DEF_FREQUENCY_UP_THRESHOLD (90) -#define DEF_SAMPLING_DOWN_FACTOR (15) -#define MAX_SAMPLING_DOWN_FACTOR (100000) -#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) -#define MICRO_FREQUENCY_UP_THRESHOLD (85) -#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) -#define MIN_FREQUENCY_UP_THRESHOLD (11) -#define MAX_FREQUENCY_UP_THRESHOLD (100) -#define MIN_FREQUENCY_DOWN_DIFFERENTIAL (1) - -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) - * this governor will not work. - * All times here are in uS. - */ -#define MIN_SAMPLING_RATE_RATIO (2) - -static unsigned int min_sampling_rate; - -#define LATENCY_MULTIPLIER (1000) -#define MIN_LATENCY_MULTIPLIER (100) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) - -static void do_dbs_timer(struct work_struct *work); -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event); - -#ifdef _LIMIT_LCD_OFF_CPU_MAX_FREQ_ -#ifdef CONFIG_HAS_EARLYSUSPEND -static struct early_suspend cpufreq_gov_early_suspend; -static unsigned int cpufreq_gov_lcd_status; -#endif -#endif - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND -static -#endif -struct cpufreq_governor cpufreq_gov_intellidemand = { - .name = "intellidemand", - .governor = cpufreq_governor_dbs, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, -}; - -/* Sampling types */ -enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; - -struct cpu_dbs_info_s { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_iowait; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - struct cpufreq_frequency_table *freq_table; - unsigned int freq_lo; - unsigned int freq_lo_jiffies; - unsigned int freq_hi_jiffies; - unsigned int rate_mult; - int cpu; - unsigned int sample_type:1; - /* - * percpu mutex that serializes governor limit change with - * do_dbs_timer invocation. We do not want do_dbs_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - -/* - * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on - * different CPUs. It protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); - -static struct workqueue_struct *kintellidemand_wq; - -static struct dbs_tuners { - unsigned int sampling_rate; - unsigned int up_threshold; - unsigned int down_differential; - unsigned int ignore_nice; - unsigned int sampling_down_factor; - unsigned int powersave_bias; - unsigned int io_is_busy; -} dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, - .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, - .ignore_nice = 0, - .powersave_bias = 0, -}; - -static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, - cputime64_t *wall) -{ - u64 idle_time; - u64 cur_wall_time; - u64 busy_time; - - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - - busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - - idle_time = cur_wall_time - busy_time; - if (wall) - *wall = jiffies_to_usecs(cur_wall_time); - - return jiffies_to_usecs(idle_time); -} - -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) -{ - u64 idle_time = get_cpu_idle_time_us(cpu, wall); - - if (idle_time == -1ULL) - return get_cpu_idle_time_jiffy(cpu, wall); - - return idle_time; -} - -static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) -{ - u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); - - if (iowait_time == -1ULL) - return 0; - - return iowait_time; -} - -/* - * Find right freq to be set now with powersave_bias on. - * Returns the freq_hi to be used right now and will set freq_hi_jiffies, - * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. - */ -static unsigned int powersave_bias_target(struct cpufreq_policy *policy, - unsigned int freq_next, - unsigned int relation) -{ - unsigned int freq_req, freq_reduc, freq_avg; - unsigned int freq_hi, freq_lo; - unsigned int index = 0; - unsigned int jiffies_total, jiffies_hi, jiffies_lo; - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, - policy->cpu); - - if (!dbs_info->freq_table) { - dbs_info->freq_lo = 0; - dbs_info->freq_lo_jiffies = 0; - return freq_next; - } - - cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, - relation, &index); - freq_req = dbs_info->freq_table[index].frequency; - freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; - freq_avg = freq_req - freq_reduc; - - /* Find freq bounds for freq_avg in freq_table */ - index = 0; - cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, - CPUFREQ_RELATION_H, &index); - freq_lo = dbs_info->freq_table[index].frequency; - index = 0; - cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, - CPUFREQ_RELATION_L, &index); - freq_hi = dbs_info->freq_table[index].frequency; - - /* Find out how long we have to be in hi and lo freqs */ - if (freq_hi == freq_lo) { - dbs_info->freq_lo = 0; - dbs_info->freq_lo_jiffies = 0; - return freq_lo; - } - jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - jiffies_hi = (freq_avg - freq_lo) * jiffies_total; - jiffies_hi += ((freq_hi - freq_lo) / 2); - jiffies_hi /= (freq_hi - freq_lo); - jiffies_lo = jiffies_total - jiffies_hi; - dbs_info->freq_lo = freq_lo; - dbs_info->freq_lo_jiffies = jiffies_lo; - dbs_info->freq_hi_jiffies = jiffies_hi; - return freq_hi; -} - -static void intellidemand_powersave_bias_init_cpu(int cpu) -{ - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - dbs_info->freq_table = cpufreq_frequency_get_table(cpu); - dbs_info->freq_lo = 0; -} - -static void intellidemand_powersave_bias_init(void) -{ - int i; - for_each_online_cpu(i) { - intellidemand_powersave_bias_init_cpu(i); - } -} - -/************************** sysfs interface ************************/ - -static ssize_t show_sampling_rate_max(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - printk_once(KERN_INFO "CPUFREQ: intellidemand sampling_rate_max " - "sysfs file is deprecated - used by: %s\n", current->comm); - return sprintf(buf, "%u\n", -1U); -} - -static ssize_t show_sampling_rate_min(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", min_sampling_rate); -} - -define_one_global_ro(sampling_rate_max); -define_one_global_ro(sampling_rate_min); - -/* cpufreq_intellidemand Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(io_is_busy, io_is_busy); -show_one(up_threshold, up_threshold); -show_one(down_differential, down_differential); -show_one(sampling_down_factor, sampling_down_factor); -show_one(ignore_nice_load, ignore_nice); -show_one(powersave_bias, powersave_bias); - -static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.io_is_busy = !!input; - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || - input < MIN_FREQUENCY_UP_THRESHOLD) { - return -EINVAL; - } - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.up_threshold = input; - mutex_unlock(&dbs_mutex); - - return count; -} -static ssize_t store_sampling_down_factor(struct kobject *a, - struct attribute *b, const char *buf, size_t count) -{ - unsigned int input, j; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) - return -EINVAL; - mutex_lock(&dbs_mutex); - dbs_tuners_ins.sampling_down_factor = input; - - /* Reset down sampling multiplier in case it was active */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(od_cpu_dbs_info, j); - dbs_info->rate_mult = 1; - } - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - return count; - } - dbs_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle */ - for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(od_cpu_dbs_info, j); - dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - - } - return count; -} - -static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1) - return -EINVAL; - - if (input > 1000) - input = 1000; - - mutex_lock(&dbs_mutex); - dbs_tuners_ins.powersave_bias = input; - intellidemand_powersave_bias_init(); - mutex_unlock(&dbs_mutex); - - return count; -} - -static ssize_t store_down_differential(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - mutex_lock(&dbs_mutex); - if (ret != 1 || input >= dbs_tuners_ins.up_threshold || - input < MIN_FREQUENCY_DOWN_DIFFERENTIAL) { - mutex_unlock(&dbs_mutex); - return -EINVAL; - } - - dbs_tuners_ins.down_differential = input; - mutex_unlock(&dbs_mutex); - - return count; -} -define_one_global_rw(sampling_rate); -define_one_global_rw(io_is_busy); -define_one_global_rw(up_threshold); -define_one_global_rw(down_differential); -define_one_global_rw(sampling_down_factor); -define_one_global_rw(ignore_nice_load); -define_one_global_rw(powersave_bias); -#ifdef CONFIG_SEC_LIMIT_MAX_FREQ // limit max freq -define_one_global_rw(lmf_temp); -define_one_global_rw(lmf_browser); -define_one_global_rw(lmf_active_load); -define_one_global_rw(lmf_inactive_load); -#endif -static struct attribute *dbs_attributes[] = { - &sampling_rate_max.attr, - &sampling_rate_min.attr, - &sampling_rate.attr, - &up_threshold.attr, - &down_differential.attr, - &sampling_down_factor.attr, - &ignore_nice_load.attr, - &powersave_bias.attr, - &io_is_busy.attr, -#ifdef CONFIG_SEC_LIMIT_MAX_FREQ // limit max freq - &lmf_temp.attr, - &lmf_browser.attr, - &lmf_active_load.attr, - &lmf_inactive_load.attr, -#endif - NULL -}; - -static struct attribute_group dbs_attr_group = { - .attrs = dbs_attributes, - .name = "intellidemand", -}; - -/************************** sysfs end ************************/ - -static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) -{ - if (dbs_tuners_ins.powersave_bias) - freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); - else if (p->cur == p->max) - return; - - __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? - CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); -} - -static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) -{ - unsigned int max_load_freq; - - struct cpufreq_policy *policy; - unsigned int j; - - this_dbs_info->freq_lo = 0; - policy = this_dbs_info->cur_policy; - - /* - * Every sampling_rate, we check, if current idle time is less - * than 20% (default), then we try to increase frequency - * Every sampling_rate, we look for a the lowest - * frequency which can sustain the load while keeping idle time over - * 30%. If such a frequency exist, we try to decrease to this frequency. - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of current frequency - */ - - /* Get Absolute Load - in terms of freq */ - max_load_freq = 0; - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; - unsigned int idle_time, wall_time, iowait_time; - unsigned int load, load_freq; - int freq_avg; - - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - - wall_time = (unsigned int) - (cur_wall_time - j_dbs_info->prev_cpu_wall); - j_dbs_info->prev_cpu_wall = cur_wall_time; - - idle_time = (unsigned int) - (cur_idle_time - j_dbs_info->prev_cpu_idle); - j_dbs_info->prev_cpu_idle = cur_idle_time; - - iowait_time = (unsigned int) - (cur_iowait_time - j_dbs_info->prev_cpu_iowait); - j_dbs_info->prev_cpu_iowait = cur_iowait_time; - - if (dbs_tuners_ins.ignore_nice) { - u64 cur_nice; - unsigned long cur_nice_jiffies; - - cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - - j_dbs_info->prev_cpu_nice; - /* - * Assumption: nice time between sampling periods will - * be less than 2^32 jiffies for 32 bit sys - */ - cur_nice_jiffies = (unsigned long) - cputime64_to_jiffies64(cur_nice); - - j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - idle_time += jiffies_to_usecs(cur_nice_jiffies); - } - /* - * For the purpose of ondemand, waiting for disk IO is an - * indication that you're performance critical, and not that - * the system is actually idle. So subtract the iowait time - * from the cpu idle time. - */ - - if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) - idle_time -= iowait_time; - - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - load = 100 * (wall_time - idle_time) / wall_time; - - freq_avg = __cpufreq_driver_getavg(policy, j); - if (freq_avg <= 0) - freq_avg = policy->cur; - - load_freq = load * freq_avg; - if (load_freq > max_load_freq) - max_load_freq = load_freq; - } - - /* Check for frequency increase */ - if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { - -/* In case of increase to max freq., freq. scales by 2 step for reducing the current consumption*/ -#ifdef _LIMIT_LCD_OFF_CPU_MAX_FREQ_ - if(!cpufreq_gov_lcd_status) { - if (policy->cur < policy->max) { - if (policy->cur < 400000) dbs_freq_increase(policy, 800000); - else if (policy->cur < 800000) dbs_freq_increase(policy, 1000000); - else { - this_dbs_info->rate_mult = dbs_tuners_ins.sampling_down_factor; - dbs_freq_increase(policy, policy->max); - } - } - return; - } else -#endif - /* If switching to max speed, apply sampling_down_factor */ - if (policy->cur < policy->max) - this_dbs_info->rate_mult = - dbs_tuners_ins.sampling_down_factor; - dbs_freq_increase(policy, policy->max); - return; - } - - /* Check for frequency decrease */ - /* if we cannot reduce the frequency anymore, break out early */ - if (policy->cur == policy->min) - return; - - /* - * The optimal frequency is the frequency that is the lowest that - * can support the current CPU usage without triggering the up - * policy. To be safe, we focus 10 points under the threshold. - */ - if (max_load_freq < - (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * - policy->cur) { - unsigned int freq_next; - freq_next = max_load_freq / - (dbs_tuners_ins.up_threshold - - dbs_tuners_ins.down_differential); - - /* No longer fully busy, reset rate_mult */ - this_dbs_info->rate_mult = 1; - - if (freq_next < policy->min) - freq_next = policy->min; - - if (!dbs_tuners_ins.powersave_bias) { - __cpufreq_driver_target(policy, freq_next, - CPUFREQ_RELATION_L); - } else { - int freq = powersave_bias_target(policy, freq_next, - CPUFREQ_RELATION_L); - __cpufreq_driver_target(policy, freq, - CPUFREQ_RELATION_L); - } - } -} - -static void do_dbs_timer(struct work_struct *work) -{ - struct cpu_dbs_info_s *dbs_info = - container_of(work, struct cpu_dbs_info_s, work.work); - unsigned int cpu = dbs_info->cpu; - int sample_type = dbs_info->sample_type; - - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate - * dbs_info->rate_mult); - -#if 0 - /* Don't care too much about synchronizing the workqueue in both cpus */ - if (num_online_cpus() > 1) - delay -= jiffies % delay; -#endif - - mutex_lock(&dbs_info->timer_mutex); - - /* Common NORMAL_SAMPLE setup */ - dbs_info->sample_type = DBS_NORMAL_SAMPLE; - if (!dbs_tuners_ins.powersave_bias || - sample_type == DBS_NORMAL_SAMPLE) { - dbs_check_cpu(dbs_info); - if (dbs_info->freq_lo) { - /* Setup timer for SUB_SAMPLE */ - dbs_info->sample_type = DBS_SUB_SAMPLE; - delay = dbs_info->freq_hi_jiffies; - } - } else { - __cpufreq_driver_target(dbs_info->cur_policy, - dbs_info->freq_lo, CPUFREQ_RELATION_H); - } - queue_delayed_work_on(cpu, kintellidemand_wq, &dbs_info->work, delay); - mutex_unlock(&dbs_info->timer_mutex); -} - -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) -{ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - dbs_info->sample_type = DBS_NORMAL_SAMPLE; - INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - queue_delayed_work_on(dbs_info->cpu, kintellidemand_wq, &dbs_info->work, - delay); -} - -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) -{ - cancel_delayed_work_sync(&dbs_info->work); -} - -/* - * Not all CPUs want IO time to be accounted as busy; this dependson how - * efficient idling at a higher frequency/voltage is. - * Pavel Machek says this is not so for various generations of AMD and old - * Intel systems. - * Mike Chan (androidlcom) calis this is also not true for ARM. - * Because of this, whitelist specific known (series) of CPUs by default, and - * leave all others up to the user. - */ -static int should_io_be_busy(void) -{ -#if defined(CONFIG_X86) - /* - * For Intel, Core 2 (model 15) andl later have an efficient idle. - */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_data.x86 == 6 && - boot_cpu_data.x86_model >= 15) - return 1; -#endif -#if defined(CONFIG_ARM) - return 1; -#endif - return 0; -} - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - //per_cpu(cpu_load, cpu) = 0; - mutex_lock(&dbs_mutex); - - dbs_enable++; - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - j_dbs_info->prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - this_dbs_info->cpu = cpu; - this_dbs_info->rate_mult = 1; - intellidemand_powersave_bias_init_cpu(cpu); - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - unsigned int latency; - - rc = sysfs_create_group(cpufreq_global_kobject, - &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - /* Bring kernel and HW constraints together */ - min_sampling_rate = max(min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - dbs_tuners_ins.sampling_rate = - max(min_sampling_rate, - latency * LATENCY_MULTIPLIER); - dbs_tuners_ins.io_is_busy = should_io_be_busy(); - } - mutex_unlock(&dbs_mutex); - - mutex_init(&this_dbs_info->timer_mutex); - dbs_timer_init(this_dbs_info); - break; - - case CPUFREQ_GOV_STOP: - dbs_timer_exit(this_dbs_info); - - mutex_lock(&dbs_mutex); - mutex_destroy(&this_dbs_info->timer_mutex); - dbs_enable--; - mutex_unlock(&dbs_mutex); - if (!dbs_enable) - sysfs_remove_group(cpufreq_global_kobject, - &dbs_attr_group); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_dbs_info->timer_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&this_dbs_info->timer_mutex); - break; - } - return 0; -} - -#ifdef _LIMIT_LCD_OFF_CPU_MAX_FREQ_ -#ifdef CONFIG_HAS_EARLYSUSPEND -static void cpufreq_gov_suspend(struct early_suspend *h) -{ - cpufreq_gov_lcd_status = 0; - - pr_info("%s : cpufreq_gov_lcd_status %d\n", __func__, cpufreq_gov_lcd_status); -} - -static void cpufreq_gov_resume(struct early_suspend *h) -{ - cpufreq_gov_lcd_status = 1; - - pr_info("%s : cpufreq_gov_lcd_status %d\n", __func__, cpufreq_gov_lcd_status); -} -#endif -#endif - -static int __init cpufreq_gov_dbs_init(void) -{ - int err; - cputime64_t wall; - u64 idle_time; - int cpu = get_cpu(); - - idle_time = get_cpu_idle_time_us(cpu, &wall); - put_cpu(); - if (idle_time != -1ULL) { - /* Idle micro accounting is supported. Use finer thresholds */ - dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; - dbs_tuners_ins.down_differential = - MICRO_FREQUENCY_DOWN_DIFFERENTIAL; - /* - * In no_hz/micro accounting case we set the minimum frequency - * not depending on HZ, but fixed (very low). The deferred - * timer might skip some samples if idle/sleeping as needed. - */ - min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; - } else { - /* For correct statistics, we need 10 ticks for each measure */ - min_sampling_rate = - MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(1); - } - - kintellidemand_wq = create_workqueue("kintellidemand"); - if (!kintellidemand_wq) { - printk(KERN_ERR "Creation of kintellidemand failed\n"); - return -EFAULT; - } - err = cpufreq_register_governor(&cpufreq_gov_intellidemand); - if (err) - destroy_workqueue(kintellidemand_wq); - -#ifdef _LIMIT_LCD_OFF_CPU_MAX_FREQ_ -#ifdef CONFIG_HAS_EARLYSUSPEND - cpufreq_gov_lcd_status = 1; - - cpufreq_gov_early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; - - cpufreq_gov_early_suspend.suspend = cpufreq_gov_suspend; - cpufreq_gov_early_suspend.resume = cpufreq_gov_resume; - register_early_suspend(&cpufreq_gov_early_suspend); -#endif -#endif - - return err; -} - -static void __exit cpufreq_gov_dbs_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_intellidemand); - destroy_workqueue(kintellidemand_wq); -} - - -MODULE_AUTHOR("Venkatesh Pallipadi "); -MODULE_AUTHOR("Alexey Starikovskiy "); -MODULE_DESCRIPTION("'cpufreq_intellidemand' - A dynamic cpufreq governor for " - "Low Latency Frequency Transition capable processors"); -MODULE_LICENSE("GPL"); - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND -fs_initcall(cpufreq_gov_dbs_init); -#else -module_init(cpufreq_gov_dbs_init); -#endif -module_exit(cpufreq_gov_dbs_exit); - - From e7e5c5d7e5d86150d48bf2848c6d53382aceb948 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Thu, 8 Aug 2013 01:06:26 -0400 Subject: [PATCH 30/35] Revert "gdflgndsnfdkn CPU SHIZZ" This reverts commit 52bfd49456c92b3ffbbe51d21fc0422d6b99adb8. --- drivers/cpufreq/Kconfig | 549 +++----------- drivers/cpufreq/Makefile | 11 +- drivers/cpufreq/cpufreq_adaptive.c | 952 ++++++++++++++++++++++++ drivers/cpufreq/cpufreq_intellidemand.c | 890 ++++++++++++++++++++++ 4 files changed, 1929 insertions(+), 473 deletions(-) create mode 100644 drivers/cpufreq/cpufreq_adaptive.c create mode 100644 drivers/cpufreq/cpufreq_intellidemand.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index d59b441c..2e07ae54 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -103,151 +103,25 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. -config CPU_FREQ_DEFAULT_GOV_LAGFREE - bool "lagfree" - select CPU_FREQ_GOV_LAGFREE - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lagfree' as default. This allows - you to get a full dynamic frequency capable system by simply - loading your cpufreq low-level hardware driver. - Be aware that not all cpufreq drivers support the lagfree - governor. If unsure have a look at the help section of the - driver. Fallback governor will be the performance governor. config CPU_FREQ_DEFAULT_GOV_DANCEDANCE bool "dancedance" select CPU_FREQ_GOV_DANCEDANCE help -config CPU_FREQ_DEFAULT_GOV_SMARTASS2 - bool "smartass2" - select CPU_FREQ_GOV_SMARTASS2 - help - Use the CPUFreq governor 'smartassV2' as default. - config CPU_FREQ_DEFAULT_GOV_NIGHTMARE - bool "nightmare" - select CPU_FREQ_GOV_NIGHTMARE - help - -config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX - bool "interactiveX" - select CPU_FREQ_GOV_INTERACTIVEX - help - Use the CPUFreq governor 'interactiveX' as default. This allows - you to get a full dynamic cpu frequency capable system by simply - loading your cpufreq low-level hardware driver, using the - 'interactiveX' governor for latency-sensitive workloads. - -config CPU_FREQ_DEFAULT_GOV_SMARTASS - bool "smartass" - select CPU_FREQ_GOV_SMARTASS - help - Use the CPUFreq governor 'smartass' as default. - -config CPU_FREQ_DEFAULT_GOV_LAZY - - bool "lazy" - select CPU_FREQ_GOV_LAZY - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lazy' as default. - Fallback governor will be the performance governor. - -config CPU_FREQ_DEFAULT_GOV_LULZACTIVE - - bool "lulzactive" - select CPU_FREQ_GOV_LULZACTIVE - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'lulzactive' as default. - Fallback governor will be the performance governor. - -config CPU_FREQ_DEFAULT_GOV_ONDEMANDX - - bool "ondemandx" - select CPU_FREQ_GOV_ONDEMANDX - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'ondemandX' as default. - Fallback governor will be the performance governor. - -config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX - bool "interactiveX" - select CPU_FREQ_GOV_INTERACTIVEX + bool "nightmare" + select CPU_FREQ_GOV_NIGHTMARE + help config CPU_FREQ_DEFAULT_GOV_ONDEMAND bool "ondemand" + select CPU_FREQ_GOV_ONDEMAND select CPU_FREQ_GOV_PERFORMANCE help - Use the CPUFreq governor 'interactivex' as default. This allows - you to get a full dynamic cpu frequency capable system by simply - loading your cpufreq low-level hardware driver, using the - 'interactivex' governor for latency-sensitive workloads. -config CPU_FREQ_DEFAULT_GOV_MINMAX - - bool "minmax" - select CPU_FREQ_GOV_MINMAX - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'minmax' as default. This minimizes the - frequency jumps does by the governor. This is aimed at maximizing - both perfomance and battery life. - -config CPU_FREQ_DEFAULT_GOV_SavagedZen - - bool "smartass" - select CPU_FREQ_GOV_SavagedZen - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'SavagedZen' as default. - -config CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND - - bool "intellidemand" - select CPU_FREQ_GOV_INTELLIDEMAND - help - Use the CPUFreq governor 'intellidemand' as default. - -config CPU_FREQ_DEFAULT_GOV_BRAZILIANWAX - bool "brazilianwax" - select CPU_FREQ_GOV_BRAZILIANWAX - help - Use the CPUFreq governor 'brazilianwax' as default. - -config CPU_FREQ_DEFAULT_GOV_WHEATLEY - bool "wheatley" - select CPU_FREQ_GOV_WHEATLEY - help - Use as default governor - -config CPU_FREQ_DEFAULT_GOV_SCARY - bool "scary" - select CPU_FREQ_GOV_SCARY - help - Use as default governor - -config CPU_FREQ_DEFAULT_GOV_SMOOTHASS - bool "smoothass" - select CPU_FREQ_GOV_SMOOTHASS - help - Use as default governor - -config CPU_FREQ_DEFAULT_GOV_ASSWAX - bool "asswax" - select CPU_FREQ_GOV_ASSWAX - help - Use as default governor - - -config CPU_FREQ_DEFAULT_GOV_BADASS - bool "badass" - select CPU_FREQ_GOV_BADASS - help - Use the CPUFreq governor 'badass' as default. This allows + Use the CPUFreq governor 'ondemand' as default. This allows you to get a full dynamic frequency capable system by simply loading your cpufreq low-level hardware driver. - Be aware that not all cpufreq drivers support the badass + Be aware that not all cpufreq drivers support the ondemand governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. @@ -274,43 +148,20 @@ config CPU_FREQ_DEFAULT_GOV_POWERSAVE the frequency statically to the lowest frequency supported by the CPU. - For details, take a look at . - - If in doubt, say Y. - -config CPU_FREQ_GOV_SMARTASS2 - tristate "'smartassV2' cpufreq governor" - depends on CPU_FREQ - help - 'smartassV2' - a "smart" governor - - If in doubt, say N. - config CPU_FREQ_DEFAULT_GOV_SMARTASSH3 bool "smartassH3" select CPU_FREQ_GOV_SMARTASSH3 help - -config CPU_FREQ_GOV_BRAZILIANWAX - tristate "'brazilianwax' cpufreq governor" - depends on CPU_FREQ - help - 'brazilianwax' - a "smart" governor - - If in doubt, say N. - -config CPU_FREQ_GOV_INTERACTIVEX - tristate "'interactiveX' cpufreq policy governor" - help - 'interactiveX' - Modified version of interactive with sleep+wake code. + Use the CPUFreq governor 'slp' as default. -config CPU_FREQ_GOV_LULZACTIVE - tristate "'lulzactive' cpufreq governor" - depends on CPU_FREQ +config CPU_FREQ_DEFAULT_GOV_USERSPACE + bool "userspace" + select CPU_FREQ_GOV_USERSPACE help - 'lulzactive' - a new interactive governor by Tegrak! - - If in doubt, say N. + Use the CPUFreq governor 'userspace' as default. This allows + you to set the CPU frequency manually or when a userspace + program shall be able to set the CPU dynamically without having + to enable the userspace governor manually. config CPU_FREQ_DEFAULT_GOV_WHEATLEY bool "wheatley" @@ -339,89 +190,32 @@ config CPU_FREQ_GOV_ABYSSPLUG 'ondemand' instead. If in doubt, say N. -config CPU_FREQ_GOV_ONDEMAND - tristate "'ondemand' cpufreq policy governor" - select CPU_FREQ_TABLE +config CPU_FREQ_GOV_ADAPTIVE + tristate "'adaptive' cpufreq policy governor" help - 'ondemand' - This driver adds a dynamic cpufreq policy governor. - The governor does a periodic polling and - changes frequency based on the CPU utilization. - The support for this governor depends on CPU capability to - do fast frequency switching (i.e, very low latency frequency - transitions). + 'adaptive' - This driver adds a dynamic cpufreq policy governor + designed for latency-sensitive workloads and also for demanding + performance. + + This governor attempts to reduce the latency of clock + increases so that the system is more responsive to + interactive workloads in loweset steady-state but to + to reduce power consumption in middle operation level level up + will be done in step by step to prohibit system from going to + max operation level. To compile this driver as a module, choose M here: the - module will be called cpufreq_ondemand. + module will be called cpufreq_adaptive. For details, take a look at linux/Documentation/cpu-freq. If in doubt, say N. -config CPU_FREQ_GOV_ONDEMAND_2_PHASE - tristate "'2-phase' power-efficiency ondemand algorithm" - depends on CPU_FREQ_GOV_ONDEMAND - help - '2-phase' - This driver adds a new algo to save power - -config CPU_FREQ_GOV_LAZY - tristate "'lazy' cpufreq governor" - depends on CPU_FREQ - config CPU_FREQ_GOV_ASSWAX tristate "'asswax' cpufreq governor" depends on CPU_FREQ - -config CPU_FREQ_GOV_LAGFREE - tristate "'lagfree' cpufreq governor" - depends on CPU_FREQ - help - 'lagfree' - this driver is rather similar to the 'ondemand' - governor both in its source code and its purpose, the difference is - its optimisation for better suitability in a battery powered - environment. The frequency is gracefully increased and decreased - rather than jumping to 100% when speed is required. - -config LAGFREE_MAX_LOAD - int "Max CPU Load" - default 50 - depends on CPU_FREQ_GOV_LAGFREE - help - CPU freq will be increased if measured load > max_cpu_load; - -config LAGFREE_MIN_LOAD - int "Min CPU Load" - default 15 - depends on CPU_FREQ_GOV_LAGFREE - help - CPU freq will be decrease if measured load < min_cpu_load; - -config LAGFREE_FREQ_STEP_DOWN - int "Frequency Step Down" - default 108000 - depends on CPU_FREQ_GOV_LAGFREE - help - Max freqeuncy delta when ramping down. - -config LAGFREE_FREQ_SLEEP_MAX - int "Max Sleep frequeny" - default 384000 - depends on CPU_FREQ_GOV_LAGFREE - help - Max freqeuncy for screen off. - -config LAGFREE_FREQ_AWAKE_MIN - int "Min Awake frequeny" - default 384000 - depends on CPU_FREQ_GOV_LAGFREE - help - Min freqeuncy for screen on. - -config LAGFREE_FREQ_STEP_UP_SLEEP_PERCENT - int "Freq step up percent sleep" - default 20 - depends on CPU_FREQ_GOV_LAGFREE help - Frequency percent to step up while screen off. + Use as default governors config CPU_FREQ_GOV_BADASS tristate "'badass' cpufreq policy governor" @@ -431,78 +225,11 @@ config CPU_FREQ_GOV_BADASS The governor does a periodic polling and changes frequency based on the CPU utilization. The support for this governor depends on CPU capability to - do fast frequency switching (i.e, very low latency frequency - transitions). - + do fast frequency switching (i.e, very low latency frequency transitions). To compile this driver as a module, choose M here: the module will be called cpufreq_badass. - If in doubt, say N. -config CPU_FREQ_GOV_BADASS_2_PHASE - tristate "'2-phase' power-efficiency badass algorithm" - depends on CPU_FREQ_GOV_BADASS - help - '2-phase' - This driver adds a new algo to save power - -config CPU_FREQ_GOV_BADASS_2_PHASE_FREQ - int "'2-phase' badass frequency" - default 918000 - depends on CPU_FREQ_GOV_BADASS - depends on CPU_FREQ_GOV_BADASS_2_PHASE - -config CPU_FREQ_GOV_BADASS_3_PHASE - tristate "'3-phase' power-efficiency badass algorithm" - depends on CPU_FREQ_GOV_BADASS - depends on CPU_FREQ_GOV_BADASS_2_PHASE - help - '3-phase' - This driver adds a new algo to save power - -config CPU_FREQ_GOV_BADASS_3_PHASE_FREQ - int "'3-phase' badass frequency" - default 1188000 - depends on CPU_FREQ_GOV_BADASS - depends on CPU_FREQ_GOV_BADASS_2_PHASE - depends on CPU_FREQ_GOV_BADASS_3_PHASE - - -config CPU_FREQ_GOV_INTERACTIVE - tristate "'interactive' cpufreq policy governor" - help - 'interactive' - This driver adds a dynamic cpufreq policy governor - designed for latency-sensitive workloads. - - This governor attempts to reduce the latency of clock - increases so that the system is more responsive to - interactive workloads. - - To compile this driver as a module, choose M here: the - module will be called cpufreq_interactive. - - For details, take a look at linux/Documentation/cpu-freq. - - If in doubt, say N. - -config CPU_FREQ_GOV_SMARTASS2 - tristate "'smartassV2' cpufreq governor" - depends on CPU_FREQ - help - 'smartassV2' - a "smart" governor - If in doubt, say N. - -config CPU_FREQ_DEFAULT_GOV_HOTPLUG - bool "hotplug" - select CPU_FREQ_GOV_HOTPLUG - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'hotplug' as default. This allows you - to get a full dynamic frequency capable system with CPU - hotplug support by simply loading your cpufreq low-level - hardware driver. Be aware that not all cpufreq drivers - support the hotplug governor. If unsure have a look at - the help section of the driver. Fallback governor will be the - performance governor. - config CPU_FREQ_GOV_CONSERVATIVE tristate "'conservative' cpufreq governor" depends on CPU_FREQ @@ -534,206 +261,98 @@ config CPU_FREQ_GOV_NIGHTMARE tristate "'nightmare' cpufreq governor" depends on CPU_FREQ -config CPU_FREQ_GOV_SMOOTHASS - tristate "'smoothass' cpufreq governor" - depends on CPU_FREQ - help - Yet another governor by erasmux - -config CPU_FREQ_GOV_SCARY - tristate "'scary' cpufreq governor" - depends on CPU_FREQ - help - Use as default governor - -config CPU_FREQ_GOV_ASSWAX - tristate "'asswax' cpufreq governor" - depends on CPU_FREQ - help - Use as default governors - -config CPU_FREQ_GOV_LAGFREE - tristate "'lagfree' cpufreq governor" - depends on CPU_FREQ - help - 'lagfree' - this driver is rather similar to the 'ondemand' - governor both in its source code and its purpose, the difference is - its optimisation for better suitability in a battery powered - environment. The frequency is gracefully increased and decreased - rather than jumping to 100% when speed is required. - -config LAGFREE_MAX_LOAD - int "Max CPU Load" - default 50 - depends on CPU_FREQ_GOV_LAGFREE - help - CPU freq will be increased if measured load > max_cpu_load; - -config LAGFREE_MIN_LOAD - int "Min CPU Load" - default 15 - depends on CPU_FREQ_GOV_LAGFREE - help - CPU freq will be decrease if measured load < min_cpu_load; - -config LAGFREE_FREQ_STEP_DOWN - int "Frequency Step Down" - default 108000 - depends on CPU_FREQ_GOV_LAGFREE - help - Max freqeuncy delta when ramping down. - -config LAGFREE_FREQ_SLEEP_MAX - int "Max Sleep frequeny" - default 384000 - depends on CPU_FREQ_GOV_LAGFREE - help - Max freqeuncy for screen off. - -config LAGFREE_FREQ_AWAKE_MIN - int "Min Awake frequeny" - default 384000 - depends on CPU_FREQ_GOV_LAGFREE - help - Min freqeuncy for screen on. - -config LAGFREE_FREQ_STEP_UP_SLEEP_PERCENT - int "Freq step up percent sleep" - default 20 - depends on CPU_FREQ_GOV_LAGFREE - help - Frequency percent to step up while screen off. - -config CPU_FREQ_MIN_TICKS - int "Ticks between governor polling interval." - default 10 - help - Minimum number of ticks between polling interval for governors. - -config CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER - int "Sampling rate multiplier for governors." - default 1000 +config CPU_FREQ_GOV_ONDEMAND + tristate "'ondemand' cpufreq policy governor" + select CPU_FREQ_TABLE help - Sampling latency rate multiplied by the cpu switch latency. - Affects governor polling. + 'ondemand' - This driver adds a dynamic cpufreq policy governor. + The governor does a periodic polling and + changes frequency based on the CPU utilization. + The support for this governor depends on CPU capability to + do fast frequency switching (i.e, very low latency frequency + transitions). -config CPU_FREQ_GOV_SMARTASS - tristate "'smartass' cpufreq governor" - depends on CPU_FREQ - help - 'smartass' - a "smart" governor - If in doubt, say N. - -config CPU_FREQ_GOV_LAZY + To compile this driver as a module, choose M here: the + module will be called cpufreq_ondemand. - tristate "'lazy' cpufreq governor" - depends on CPU_FREQ - help - 'lazy' - a "lazy" governor + For details, take a look at linux/Documentation/cpu-freq. If in doubt, say N. -config CPU_FREQ_GOV_LULZACTIVE - - tristate "'lulzactive' cpufreq governor" - depends on CPU_FREQ +config CPU_FREQ_GOV_PERFORMANCE + tristate "'performance' governor" help - 'lulzactive' - a new interactive governor by Tegrak! + This cpufreq governor sets the frequency statically to the + highest available CPU frequency. - If in doubt, say N. + To compile this driver as a module, choose M here: the + module will be called cpufreq_performance. -config CPU_FREQ_GOV_ONDEMANDX + If in doubt, say Y. - tristate "'ondemandx' cpufreq governor" - depends on CPU_FREQ +config CPU_FREQ_GOV_POWERSAVE + tristate "'powersave' governor" help - 'ondemandx' - a new iteration of the ondemand governor - - If in doubt, say N. - -config CPU_FREQ_GOV_INTERACTIVEX + This cpufreq governor sets the frequency statically to the + lowest available CPU frequency. - tristate "'interactiveX' cpufreq governor" - depends on CPU_FREQ - help - 'interactiveX' - Modified version of interactive with sleep+wake code. + To compile this driver as a module, choose M here: the + module will be called cpufreq_powersave. - If in doubt, say N. + If in doubt, say Y. -config CPU_FREQ_GOV_MINMAX +config CPU_FREQ_GOV_SLP + tristate "'slp' cpufreq policy governor" - tristate "'minmax' cpufreq governor" +config CPU_FREQ_GOV_SMARTASSH3 + tristate "'smartassH3' cpufreq governor" depends on CPU_FREQ help - 'minmax' - this driver tries to minimize the frequency jumps by limiting - the the selected frequencies to either the min or the max frequency of - the policy. The frequency is selected according to the load. + 'smartassH3' - a "smart" governor - If in doubt, say N. +config CPU_FREQ_GOV_USERSPACE + tristate "'userspace' governor for userspace frequency scaling" + help + Enable this cpufreq governor when you either want to set the + CPU frequency manually or when a userspace program shall + be able to set the CPU dynamically, like on LART + . -config CPU_FREQ_GOV_SavagedZen + To compile this driver as a module, choose M here: the + module will be called cpufreq_userspace. - tristate "'savagedzen' cpufreq governor" - depends on CPU_FREQ - help - 'Savaged-Zen' - a smartass based governor + For details, take a look at . - If in doubt, say N. - -config CPU_FREQ_GOV_INTELLIDEMAND - tristate "'intellidemand' cpufreq governor" - depends on CPU_FREQ + If in doubt, say Y. config CPU_FREQ_GOV_WHEATLEY tristate "'wheatley' cpufreq governor" depends on CPU_FREQ - help - 'wheatley' - a performance based governor config SEC_DVFS bool "DVFS job" default n - depends on CPU_FREQ_STAT - help - 'intellidemand' - A dynamic cpufreq governor for Low Latency Frequency - Transition capable processors - - If in doubt, say N. - -config CPU_FREQ_GOV_BRAZILIANWAX - tristate "'brazilianwax' cpufreq governor" depends on CPU_FREQ - help - brazilianwax' - a "slightly more agressive smart" optimized governor! - - If in doubt, say Y. - + +config SEC_DVFS_BOOSTER + bool "DVFS input booster" + default y + depends on SEC_DVFS menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" endmenu -config CPU_FREQ_GOV_HOTPLUG - tristate "'hotplug' cpufreq governor" - depends on CPU_FREQ && NO_HZ && HOTPLUG_CPU - help - 'hotplug' - this driver mimics the frequency scaling behavior - in 'ondemand', but with several key differences. First is - that frequency transitions use the CPUFreq table directly, - instead of incrementing in a percentage of the maximum - available frequency. Second 'hotplug' will offline auxillary - CPUs when the system is idle, and online those CPUs once the - system becomes busy again. This last feature is needed for - architectures which transition to low power states when only - the "master" CPU is online, or for thermally constrained - devices. - - If you don't have one of these architectures or devices, use - 'ondemand' instead. - - If in doubt, say N. +menu "ARM CPU frequency scaling drivers" +depends on ARM +source "drivers/cpufreq/Kconfig.arm" +endmenu +menu "PowerPC CPU frequency scaling drivers" +depends on PPC32 || PPC64 +source "drivers/cpufreq/Kconfig.powerpc" +endmenu endif endmenu diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index bc6f220f..be135afd 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -16,20 +16,15 @@ obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o obj-$(CONFIG_CPU_FREQ_GOV_SLP) += cpufreq_slp.o obj-$(CONFIG_CPU_FREQ_GOV_SMARTASSH3) += cpufreq_smartassH3.o -obj-$(CONFIG_CPU_FREQ_GOV_lulzactive) += cpufreq_lulzactive.o -obj-$(CONFIG_CPU_FREQ_GOV_SavagedZen) += cpufreq_savagedzen.o -obj-$(CONFIG_CPU_FREQ_GOV_BRAZILIANWAX) += cpufreq_brazilianwax.o -obj-$(CONFIG_CPU_FREQ_GOV_SCARY) += cpufreq_scary.o -obj-$(CONFIG_CPU_FREQ_GOV_SMOOTHASS) += cpufreq_smoothass.o -obj-$(CONFIG_CPU_FREQ_GOV_SCARY) += cpufreq_scary.o -obj-$(CONFIG_CPU_FREQ_GOV_HOTPLUG) += cpufreq_hotplug.o obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o +obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o +obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o -##################################################################################d +################################################################################## # x86 drivers. # Link order matters. K8 is preferred to ACPI because of firmware bugs in early # K8 systems. ACPI is preferred to all other hardware-specific drivers. diff --git a/drivers/cpufreq/cpufreq_adaptive.c b/drivers/cpufreq/cpufreq_adaptive.c new file mode 100644 index 00000000..2eff3e28 --- /dev/null +++ b/drivers/cpufreq/cpufreq_adaptive.c @@ -0,0 +1,952 @@ +/* + * drivers/cpufreq/cpufreq_adaptive.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) +#define DEF_FREQUENCY_UP_THRESHOLD (80) +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +#define MICRO_FREQUENCY_UP_THRESHOLD (95) +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) +#define MIN_ONDEMAND_THRESHOLD (4) +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. + * All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +static void (*pm_idle_old)(void); +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ADAPTIVE +static +#endif +struct cpufreq_governor cpufreq_gov_adaptive = { + .name = "adaptive", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_hi_jiffies; + int cpu; + unsigned int sample_type:1; + bool ondemand; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on + * different CPUs. It protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); +static struct task_struct *up_task; +static struct workqueue_struct *down_wq; +static struct work_struct freq_scale_down_work; +static cpumask_t up_cpumask; +static spinlock_t up_cpumask_lock; +static cpumask_t down_cpumask; +static spinlock_t down_cpumask_lock; + +static DEFINE_PER_CPU(cputime64_t, idle_in_idle); +static DEFINE_PER_CPU(cputime64_t, idle_exit_wall); + +static struct timer_list cpu_timer; +static unsigned int target_freq; +static DEFINE_MUTEX(short_timer_mutex); + +/* Go to max speed when CPU load at or above this value. */ +#define DEFAULT_GO_MAXSPEED_LOAD 60 +static unsigned long go_maxspeed_load; + +#define DEFAULT_KEEP_MINSPEED_LOAD 30 +static unsigned long keep_minspeed_load; + +#define DEFAULT_STEPUP_LOAD 10 +static unsigned long step_up_load; + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int ignore_nice; + unsigned int io_is_busy; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .ignore_nice = 0, +}; + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +static void adaptive_init_cpu(int cpu) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_max(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + printk_once(KERN_INFO "CPUFREQ: adaptive sampling_rate_max " + "sysfs file is deprecated - used by: %s\n", current->comm); + return sprintf(buf, "%u\n", -1U); +} + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_max); +define_one_global_ro(sampling_rate_min); + +/* cpufreq_adaptive Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(ignore_nice_load, ignore_nice); + +/*** delete after deprecation time ***/ + +#define DEPRECATION_MSG(file_name) \ + printk_once(KERN_INFO "CPUFREQ: Per core adaptive sysfs " \ + "interface is deprecated - " #file_name "\n"); + +#define show_one_old(file_name) \ +static ssize_t show_##file_name##_old \ +(struct cpufreq_policy *unused, char *buf) \ +{ \ + printk_once(KERN_INFO "CPUFREQ: Per core adaptive sysfs " \ + "interface is deprecated - " #file_name "\n"); \ + return show_##file_name(NULL, NULL, buf); \ +} + +/*** delete after deprecation time ***/ + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.io_is_busy = !!input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.up_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + mutex_lock(&dbs_mutex); + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + mutex_unlock(&dbs_mutex); + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time_us(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + + } + mutex_unlock(&dbs_mutex); + + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(ignore_nice_load); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_max.attr, + &sampling_rate_min.attr, + &sampling_rate.attr, + &up_threshold.attr, + &ignore_nice_load.attr, + &io_is_busy.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "adaptive", +}; + +/*** delete after deprecation time ***/ + +#define write_one_old(file_name) \ +static ssize_t store_##file_name##_old \ +(struct cpufreq_policy *unused, const char *buf, size_t count) \ +{ \ + printk_once(KERN_INFO "CPUFREQ: Per core adaptive sysfs " \ + "interface is deprecated - " #file_name "\n"); \ + return store_##file_name(NULL, NULL, buf, count); \ +} + +static void cpufreq_adaptive_timer(unsigned long data) +{ + cputime64_t cur_idle; + cputime64_t cur_wall; + unsigned int delta_idle; + unsigned int delta_time; + int short_load; + unsigned int new_freq; + unsigned long flags; + struct cpu_dbs_info_s *this_dbs_info; + struct cpufreq_policy *policy; + unsigned int j; + unsigned int index; + unsigned int max_load = 0; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); + + policy = this_dbs_info->cur_policy; + + for_each_online_cpu(j) { + cur_idle = get_cpu_idle_time_us(j, &cur_wall); + + delta_idle = (unsigned int) cputime64_sub(cur_idle, + per_cpu(idle_in_idle, j)); + delta_time = (unsigned int) cputime64_sub(cur_wall, + per_cpu(idle_exit_wall, j)); + + /* + * If timer ran less than 1ms after short-term sample started, retry. + */ + if (delta_time < 1000) + goto do_nothing; + + if (delta_idle > delta_time) + short_load = 0; + else + short_load = 100 * (delta_time - delta_idle) / delta_time; + + if (short_load > max_load) + max_load = short_load; + } + + if (this_dbs_info->ondemand) + goto do_nothing; + + if (max_load >= go_maxspeed_load) + new_freq = policy->max; + else + new_freq = policy->max * max_load / 100; + + if ((max_load <= keep_minspeed_load) && + (policy->cur == policy->min)) + new_freq = policy->cur; + + if (cpufreq_frequency_table_target(policy, this_dbs_info->freq_table, + new_freq, CPUFREQ_RELATION_L, + &index)) { + goto do_nothing; + } + + new_freq = this_dbs_info->freq_table[index].frequency; + + target_freq = new_freq; + + if (new_freq < this_dbs_info->cur_policy->cur) { + spin_lock_irqsave(&down_cpumask_lock, flags); + cpumask_set_cpu(0, &down_cpumask); + spin_unlock_irqrestore(&down_cpumask_lock, flags); + queue_work(down_wq, &freq_scale_down_work); + } else { + spin_lock_irqsave(&up_cpumask_lock, flags); + cpumask_set_cpu(0, &up_cpumask); + spin_unlock_irqrestore(&up_cpumask_lock, flags); + wake_up_process(up_task); + } + + return; + +do_nothing: + for_each_online_cpu(j) { + per_cpu(idle_in_idle, j) = + get_cpu_idle_time_us(j, + &per_cpu(idle_exit_wall, j)); + } + mod_timer(&cpu_timer, jiffies + 2); + schedule_delayed_work_on(0, &this_dbs_info->work, 10); + + if (mutex_is_locked(&short_timer_mutex)) + mutex_unlock(&short_timer_mutex); + return; +} + +/*** delete after deprecation time ***/ + +/************************** sysfs end ************************/ + +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ +#ifndef CONFIG_ARCH_EXYNOS4 + if (p->cur == p->max) + return; +#endif + __cpufreq_driver_target(p, freq, CPUFREQ_RELATION_H); +} + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + unsigned int max_load_freq; + + struct cpufreq_policy *policy; + unsigned int j; + + unsigned int index, new_freq; + unsigned int longterm_load = 0; + + policy = this_dbs_info->cur_policy; + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate, we look for a the lowest + * frequency which can sustain the load while keeping idle time over + * 30%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of current frequency + */ + + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load, load_freq; + int freq_avg; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time_us(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, + j_dbs_info->prev_cpu_iowait); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + /* + * For the purpose of adaptive, waiting for disk IO is an + * indication that you're performance critical, and not that + * the system is actually idle. So subtract the iowait time + * from the cpu idle time. + */ + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + if (load > longterm_load) + longterm_load = load; + + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = load * freq_avg; + + if (load_freq > max_load_freq) + max_load_freq = load_freq; + } + + if (longterm_load >= MIN_ONDEMAND_THRESHOLD) + this_dbs_info->ondemand = true; + else + this_dbs_info->ondemand = false; + + /* Check for frequency increase */ + if (max_load_freq > (dbs_tuners_ins.up_threshold * policy->cur)) { + cpufreq_frequency_table_target(policy, + this_dbs_info->freq_table, + (policy->cur + step_up_load), + CPUFREQ_RELATION_L, &index); + + new_freq = this_dbs_info->freq_table[index].frequency; + dbs_freq_increase(policy, new_freq); + return; + } + + /* Check for frequency decrease */ + /* if we cannot reduce the frequency anymore, break out early */ +#ifndef CONFIG_ARCH_EXYNOS4 + if (policy->cur == policy->min) + return; +#endif + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + if (freq_next < policy->min) + freq_next = policy->min; + + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + + int delay; + + mutex_lock(&dbs_info->timer_mutex); + + /* Common NORMAL_SAMPLE setup */ + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + dbs_check_cpu(dbs_info); + + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + schedule_delayed_work_on(cpu, &dbs_info->work, delay); + + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); +} + +/* + * Not all CPUs want IO time to be accounted as busy; this dependson how + * efficient idling at a higher frequency/voltage is. + * Pavel Machek says this is not so for various generations of AMD and old + * Intel systems. + * Mike Chan (androidlcom) calis this is also not true for ARM. + * Because of this, whitelist specific known (series) of CPUs by default, and + * leave all others up to the user. + */ +static int should_io_be_busy(void) +{ +#if defined(CONFIG_X86) + /* + * For Intel, Core 2 (model 15) andl later have an efficient idle. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model >= 15) + return 1; +#endif + return 0; +} + +static void cpufreq_adaptive_idle(void) +{ + int i; + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, 0); + struct cpufreq_policy *policy; + + policy = dbs_info->cur_policy; + + pm_idle_old(); + + if ((policy->cur == policy->min) || + (policy->cur == policy->max)) { + + if (timer_pending(&cpu_timer)) + return; + + if (mutex_trylock(&short_timer_mutex)) { + for_each_online_cpu(i) { + per_cpu(idle_in_idle, i) = + get_cpu_idle_time_us(i, + &per_cpu(idle_exit_wall, i)); + } + + mod_timer(&cpu_timer, jiffies + 2); + cancel_delayed_work(&dbs_info->work); + } + } else { + if (timer_pending(&cpu_timer)) + del_timer(&cpu_timer); + + } +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + + rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time_us(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + } + this_dbs_info->cpu = cpu; + adaptive_init_cpu(cpu); + + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); + dbs_tuners_ins.io_is_busy = should_io_be_busy(); + } + mutex_unlock(&dbs_mutex); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + + pm_idle_old = pm_idle; + pm_idle = cpufreq_adaptive_idle; + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + sysfs_remove_group(&policy->kobj, &dbs_attr_group); + mutex_destroy(&this_dbs_info->timer_mutex); + dbs_enable--; + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + pm_idle = pm_idle_old; + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +static inline void cpufreq_adaptive_update_time(void) +{ + struct cpu_dbs_info_s *this_dbs_info; + struct cpufreq_policy *policy; + int j; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); + policy = this_dbs_info->cur_policy; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time_us(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + j_dbs_info->prev_cpu_wall = cur_wall_time; + + j_dbs_info->prev_cpu_idle = cur_idle_time; + + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + + } + +} + +static int cpufreq_adaptive_up_task(void *data) +{ + unsigned long flags; + struct cpu_dbs_info_s *this_dbs_info; + struct cpufreq_policy *policy; + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); + policy = this_dbs_info->cur_policy; + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + spin_lock_irqsave(&up_cpumask_lock, flags); + + if (cpumask_empty(&up_cpumask)) { + spin_unlock_irqrestore(&up_cpumask_lock, flags); + schedule(); + + if (kthread_should_stop()) + break; + + spin_lock_irqsave(&up_cpumask_lock, flags); + } + + set_current_state(TASK_RUNNING); + + cpumask_clear(&up_cpumask); + spin_unlock_irqrestore(&up_cpumask_lock, flags); + + __cpufreq_driver_target(this_dbs_info->cur_policy, + target_freq, + CPUFREQ_RELATION_H); + if (policy->cur != policy->max) { + mutex_lock(&this_dbs_info->timer_mutex); + + schedule_delayed_work_on(0, &this_dbs_info->work, delay); + mutex_unlock(&this_dbs_info->timer_mutex); + cpufreq_adaptive_update_time(); + } + if (mutex_is_locked(&short_timer_mutex)) + mutex_unlock(&short_timer_mutex); + } + + return 0; +} + +static void cpufreq_adaptive_freq_down(struct work_struct *work) +{ + unsigned long flags; + struct cpu_dbs_info_s *this_dbs_info; + struct cpufreq_policy *policy; + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + spin_lock_irqsave(&down_cpumask_lock, flags); + cpumask_clear(&down_cpumask); + spin_unlock_irqrestore(&down_cpumask_lock, flags); + + this_dbs_info = &per_cpu(od_cpu_dbs_info, 0); + policy = this_dbs_info->cur_policy; + + __cpufreq_driver_target(this_dbs_info->cur_policy, + target_freq, + CPUFREQ_RELATION_H); + + if (policy->cur != policy->min) { + mutex_lock(&this_dbs_info->timer_mutex); + + schedule_delayed_work_on(0, &this_dbs_info->work, delay); + mutex_unlock(&this_dbs_info->timer_mutex); + cpufreq_adaptive_update_time(); + } + + if (mutex_is_locked(&short_timer_mutex)) + mutex_unlock(&short_timer_mutex); +} + +static int __init cpufreq_gov_dbs_init(void) +{ + cputime64_t wall; + u64 idle_time; + int cpu = get_cpu(); + + struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD; + keep_minspeed_load = DEFAULT_KEEP_MINSPEED_LOAD; + step_up_load = DEFAULT_STEPUP_LOAD; + + idle_time = get_cpu_idle_time_us(cpu, &wall); + put_cpu(); + if (idle_time != -1ULL) { + /* Idle micro accounting is supported. Use finer thresholds */ + dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + dbs_tuners_ins.down_differential = + MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + /* + * In no_hz/micro accounting case we set the minimum frequency + * not depending on HZ, but fixed (very low). The deferred + * timer might skip some samples if idle/sleeping as needed. + */ + min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + } else { + /* For correct statistics, we need 10 ticks for each measure */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + } + + init_timer(&cpu_timer); + cpu_timer.function = cpufreq_adaptive_timer; + + up_task = kthread_create(cpufreq_adaptive_up_task, NULL, + "kadaptiveup"); + + if (IS_ERR(up_task)) + return PTR_ERR(up_task); + + sched_setscheduler_nocheck(up_task, SCHED_FIFO, ¶m); + get_task_struct(up_task); + + /* No rescuer thread, bind to CPU queuing the work for possibly + warm cache (probably doesn't matter much). */ + down_wq = alloc_workqueue("kadaptive_down", 0, 1); + + if (!down_wq) + goto err_freeuptask; + + INIT_WORK(&freq_scale_down_work, cpufreq_adaptive_freq_down); + + + return cpufreq_register_governor(&cpufreq_gov_adaptive); +err_freeuptask: + put_task_struct(up_task); + return -ENOMEM; +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_adaptive); +} + + +MODULE_AUTHOR("Venkatesh Pallipadi "); +MODULE_AUTHOR("Alexey Starikovskiy "); +MODULE_DESCRIPTION("'cpufreq_adaptive' - A dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ADAPTIVE +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/cpufreq/cpufreq_intellidemand.c b/drivers/cpufreq/cpufreq_intellidemand.c new file mode 100644 index 00000000..f0a5630e --- /dev/null +++ b/drivers/cpufreq/cpufreq_intellidemand.c @@ -0,0 +1,890 @@ +/* + * drivers/cpufreq/cpufreq_intellidemand.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define _LIMIT_LCD_OFF_CPU_MAX_FREQ_ + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) +#define DEF_FREQUENCY_UP_THRESHOLD (90) +#define DEF_SAMPLING_DOWN_FACTOR (15) +#define MAX_SAMPLING_DOWN_FACTOR (100000) +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +#define MICRO_FREQUENCY_UP_THRESHOLD (85) +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) +#define MIN_FREQUENCY_DOWN_DIFFERENTIAL (1) + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. + * All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +static void do_dbs_timer(struct work_struct *work); +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event); + +#ifdef _LIMIT_LCD_OFF_CPU_MAX_FREQ_ +#ifdef CONFIG_HAS_EARLYSUSPEND +static struct early_suspend cpufreq_gov_early_suspend; +static unsigned int cpufreq_gov_lcd_status; +#endif +#endif + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND +static +#endif +struct cpufreq_governor cpufreq_gov_intellidemand = { + .name = "intellidemand", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_lo; + unsigned int freq_lo_jiffies; + unsigned int freq_hi_jiffies; + unsigned int rate_mult; + int cpu; + unsigned int sample_type:1; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on + * different CPUs. It protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct workqueue_struct *kintellidemand_wq; + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int ignore_nice; + unsigned int sampling_down_factor; + unsigned int powersave_bias; + unsigned int io_is_busy; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .ignore_nice = 0, + .powersave_bias = 0, +}; + +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, + cputime64_t *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + + return idle_time; +} + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +/* + * Find right freq to be set now with powersave_bias on. + * Returns the freq_hi to be used right now and will set freq_hi_jiffies, + * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. + */ +static unsigned int powersave_bias_target(struct cpufreq_policy *policy, + unsigned int freq_next, + unsigned int relation) +{ + unsigned int freq_req, freq_reduc, freq_avg; + unsigned int freq_hi, freq_lo; + unsigned int index = 0; + unsigned int jiffies_total, jiffies_hi, jiffies_lo; + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, + policy->cpu); + + if (!dbs_info->freq_table) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_next; + } + + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, + relation, &index); + freq_req = dbs_info->freq_table[index].frequency; + freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; + freq_avg = freq_req - freq_reduc; + + /* Find freq bounds for freq_avg in freq_table */ + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_H, &index); + freq_lo = dbs_info->freq_table[index].frequency; + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_L, &index); + freq_hi = dbs_info->freq_table[index].frequency; + + /* Find out how long we have to be in hi and lo freqs */ + if (freq_hi == freq_lo) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_lo; + } + jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + jiffies_hi = (freq_avg - freq_lo) * jiffies_total; + jiffies_hi += ((freq_hi - freq_lo) / 2); + jiffies_hi /= (freq_hi - freq_lo); + jiffies_lo = jiffies_total - jiffies_hi; + dbs_info->freq_lo = freq_lo; + dbs_info->freq_lo_jiffies = jiffies_lo; + dbs_info->freq_hi_jiffies = jiffies_hi; + return freq_hi; +} + +static void intellidemand_powersave_bias_init_cpu(int cpu) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); + dbs_info->freq_lo = 0; +} + +static void intellidemand_powersave_bias_init(void) +{ + int i; + for_each_online_cpu(i) { + intellidemand_powersave_bias_init_cpu(i); + } +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_max(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + printk_once(KERN_INFO "CPUFREQ: intellidemand sampling_rate_max " + "sysfs file is deprecated - used by: %s\n", current->comm); + return sprintf(buf, "%u\n", -1U); +} + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_max); +define_one_global_ro(sampling_rate_min); + +/* cpufreq_intellidemand Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(down_differential, down_differential); +show_one(sampling_down_factor, sampling_down_factor); +show_one(ignore_nice_load, ignore_nice); +show_one(powersave_bias, powersave_bias); + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.io_is_busy = !!input; + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.up_threshold = input; + mutex_unlock(&dbs_mutex); + + return count; +} +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input, j; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_down_factor = input; + + /* Reset down sampling multiplier in case it was active */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->rate_mult = 1; + } + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(od_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + + } + return count; +} + +static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input > 1000) + input = 1000; + + mutex_lock(&dbs_mutex); + dbs_tuners_ins.powersave_bias = input; + intellidemand_powersave_bias_init(); + mutex_unlock(&dbs_mutex); + + return count; +} + +static ssize_t store_down_differential(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + mutex_lock(&dbs_mutex); + if (ret != 1 || input >= dbs_tuners_ins.up_threshold || + input < MIN_FREQUENCY_DOWN_DIFFERENTIAL) { + mutex_unlock(&dbs_mutex); + return -EINVAL; + } + + dbs_tuners_ins.down_differential = input; + mutex_unlock(&dbs_mutex); + + return count; +} +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(down_differential); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(powersave_bias); +#ifdef CONFIG_SEC_LIMIT_MAX_FREQ // limit max freq +define_one_global_rw(lmf_temp); +define_one_global_rw(lmf_browser); +define_one_global_rw(lmf_active_load); +define_one_global_rw(lmf_inactive_load); +#endif +static struct attribute *dbs_attributes[] = { + &sampling_rate_max.attr, + &sampling_rate_min.attr, + &sampling_rate.attr, + &up_threshold.attr, + &down_differential.attr, + &sampling_down_factor.attr, + &ignore_nice_load.attr, + &powersave_bias.attr, + &io_is_busy.attr, +#ifdef CONFIG_SEC_LIMIT_MAX_FREQ // limit max freq + &lmf_temp.attr, + &lmf_browser.attr, + &lmf_active_load.attr, + &lmf_inactive_load.attr, +#endif + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "intellidemand", +}; + +/************************** sysfs end ************************/ + +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ + if (dbs_tuners_ins.powersave_bias) + freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); + else if (p->cur == p->max) + return; + + __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? + CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); +} + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + unsigned int max_load_freq; + + struct cpufreq_policy *policy; + unsigned int j; + + this_dbs_info->freq_lo = 0; + policy = this_dbs_info->cur_policy; + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate, we look for a the lowest + * frequency which can sustain the load while keeping idle time over + * 30%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of current frequency + */ + + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load, load_freq; + int freq_avg; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) + (cur_wall_time - j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) + (cur_idle_time - j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) + (cur_iowait_time - j_dbs_info->prev_cpu_iowait); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + u64 cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + /* + * For the purpose of ondemand, waiting for disk IO is an + * indication that you're performance critical, and not that + * the system is actually idle. So subtract the iowait time + * from the cpu idle time. + */ + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = load * freq_avg; + if (load_freq > max_load_freq) + max_load_freq = load_freq; + } + + /* Check for frequency increase */ + if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { + +/* In case of increase to max freq., freq. scales by 2 step for reducing the current consumption*/ +#ifdef _LIMIT_LCD_OFF_CPU_MAX_FREQ_ + if(!cpufreq_gov_lcd_status) { + if (policy->cur < policy->max) { + if (policy->cur < 400000) dbs_freq_increase(policy, 800000); + else if (policy->cur < 800000) dbs_freq_increase(policy, 1000000); + else { + this_dbs_info->rate_mult = dbs_tuners_ins.sampling_down_factor; + dbs_freq_increase(policy, policy->max); + } + } + return; + } else +#endif + /* If switching to max speed, apply sampling_down_factor */ + if (policy->cur < policy->max) + this_dbs_info->rate_mult = + dbs_tuners_ins.sampling_down_factor; + dbs_freq_increase(policy, policy->max); + return; + } + + /* Check for frequency decrease */ + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) + return; + + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + /* No longer fully busy, reset rate_mult */ + this_dbs_info->rate_mult = 1; + + if (freq_next < policy->min) + freq_next = policy->min; + + if (!dbs_tuners_ins.powersave_bias) { + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } else { + int freq = powersave_bias_target(policy, freq_next, + CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int sample_type = dbs_info->sample_type; + + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate + * dbs_info->rate_mult); + +#if 0 + /* Don't care too much about synchronizing the workqueue in both cpus */ + if (num_online_cpus() > 1) + delay -= jiffies % delay; +#endif + + mutex_lock(&dbs_info->timer_mutex); + + /* Common NORMAL_SAMPLE setup */ + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + if (!dbs_tuners_ins.powersave_bias || + sample_type == DBS_NORMAL_SAMPLE) { + dbs_check_cpu(dbs_info); + if (dbs_info->freq_lo) { + /* Setup timer for SUB_SAMPLE */ + dbs_info->sample_type = DBS_SUB_SAMPLE; + delay = dbs_info->freq_hi_jiffies; + } + } else { + __cpufreq_driver_target(dbs_info->cur_policy, + dbs_info->freq_lo, CPUFREQ_RELATION_H); + } + queue_delayed_work_on(cpu, kintellidemand_wq, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + queue_delayed_work_on(dbs_info->cpu, kintellidemand_wq, &dbs_info->work, + delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); +} + +/* + * Not all CPUs want IO time to be accounted as busy; this dependson how + * efficient idling at a higher frequency/voltage is. + * Pavel Machek says this is not so for various generations of AMD and old + * Intel systems. + * Mike Chan (androidlcom) calis this is also not true for ARM. + * Because of this, whitelist specific known (series) of CPUs by default, and + * leave all others up to the user. + */ +static int should_io_be_busy(void) +{ +#if defined(CONFIG_X86) + /* + * For Intel, Core 2 (model 15) andl later have an efficient idle. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model >= 15) + return 1; +#endif +#if defined(CONFIG_ARM) + return 1; +#endif + return 0; +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + //per_cpu(cpu_load, cpu) = 0; + mutex_lock(&dbs_mutex); + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(od_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + this_dbs_info->cpu = cpu; + this_dbs_info->rate_mult = 1; + intellidemand_powersave_bias_init_cpu(cpu); + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); + dbs_tuners_ins.io_is_busy = should_io_be_busy(); + } + mutex_unlock(&dbs_mutex); + + mutex_init(&this_dbs_info->timer_mutex); + dbs_timer_init(this_dbs_info); + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + mutex_destroy(&this_dbs_info->timer_mutex); + dbs_enable--; + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +#ifdef _LIMIT_LCD_OFF_CPU_MAX_FREQ_ +#ifdef CONFIG_HAS_EARLYSUSPEND +static void cpufreq_gov_suspend(struct early_suspend *h) +{ + cpufreq_gov_lcd_status = 0; + + pr_info("%s : cpufreq_gov_lcd_status %d\n", __func__, cpufreq_gov_lcd_status); +} + +static void cpufreq_gov_resume(struct early_suspend *h) +{ + cpufreq_gov_lcd_status = 1; + + pr_info("%s : cpufreq_gov_lcd_status %d\n", __func__, cpufreq_gov_lcd_status); +} +#endif +#endif + +static int __init cpufreq_gov_dbs_init(void) +{ + int err; + cputime64_t wall; + u64 idle_time; + int cpu = get_cpu(); + + idle_time = get_cpu_idle_time_us(cpu, &wall); + put_cpu(); + if (idle_time != -1ULL) { + /* Idle micro accounting is supported. Use finer thresholds */ + dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + dbs_tuners_ins.down_differential = + MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + /* + * In no_hz/micro accounting case we set the minimum frequency + * not depending on HZ, but fixed (very low). The deferred + * timer might skip some samples if idle/sleeping as needed. + */ + min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + } else { + /* For correct statistics, we need 10 ticks for each measure */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(1); + } + + kintellidemand_wq = create_workqueue("kintellidemand"); + if (!kintellidemand_wq) { + printk(KERN_ERR "Creation of kintellidemand failed\n"); + return -EFAULT; + } + err = cpufreq_register_governor(&cpufreq_gov_intellidemand); + if (err) + destroy_workqueue(kintellidemand_wq); + +#ifdef _LIMIT_LCD_OFF_CPU_MAX_FREQ_ +#ifdef CONFIG_HAS_EARLYSUSPEND + cpufreq_gov_lcd_status = 1; + + cpufreq_gov_early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; + + cpufreq_gov_early_suspend.suspend = cpufreq_gov_suspend; + cpufreq_gov_early_suspend.resume = cpufreq_gov_resume; + register_early_suspend(&cpufreq_gov_early_suspend); +#endif +#endif + + return err; +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_intellidemand); + destroy_workqueue(kintellidemand_wq); +} + + +MODULE_AUTHOR("Venkatesh Pallipadi "); +MODULE_AUTHOR("Alexey Starikovskiy "); +MODULE_DESCRIPTION("'cpufreq_intellidemand' - A dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); + + From 55cf6f63d7ea131054b147c26b63c96792585e46 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Mon, 12 Aug 2013 11:58:45 -0400 Subject: [PATCH 31/35] cpufreq: Add abyssplugv2 from abyss kernel Conflicts: drivers/cpufreq/Makefile --- drivers/cpufreq/Makefile | 35 +- drivers/cpufreq/cpufreq_abyssplugv2.c | 1063 +++++++++++++++++++++++++ 2 files changed, 1082 insertions(+), 16 deletions(-) create mode 100644 drivers/cpufreq/cpufreq_abyssplugv2.c diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index be135afd..d553141e 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -4,25 +4,28 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq.o obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o # CPUfreq governors -obj-$(CONFIG_CPU_FREQ_GOV_ABYSSPLUG) += cpufreq_abyssplug.o -obj-$(CONFIG_CPU_FREQ_GOV_ADAPTIVE) += cpufreq_adaptive.o -obj-$(CONFIG_CPU_FREQ_GOV_ASSWAX) += cpufreq_asswax.o -obj-$(CONFIG_CPU_FREQ_GOV_BADASS) += cpufreq_badass.o -obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o -obj-$(CONFIG_CPU_FREQ_GOV_DANCEDANCE) += cpufreq_dancedance.o -obj-$(CONFIG_CPU_FREQ_GOV_NIGHTMARE) += cpufreq_nightmare.o -obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o -obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o -obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o -obj-$(CONFIG_CPU_FREQ_GOV_SLP) += cpufreq_slp.o -obj-$(CONFIG_CPU_FREQ_GOV_SMARTASSH3) += cpufreq_smartassH3.o -obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o -obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o +obj-$(CONFIG_CPU_FREQ_GOV_ABYSSPLUG) += cpufreq_abyssplug.o +obj-$(CONFIG_CPU_FREQ_GOV_ABYSSPLUG) += cpufreq_abyssplugv2.o +obj-$(CONFIG_CPU_FREQ_GOV_ADAPTIVE) += cpufreq_adaptive.o +obj-$(CONFIG_CPU_FREQ_GOV_ASSWAX) += cpufreq_asswax.o +obj-$(CONFIG_CPU_FREQ_GOV_BADASS) += cpufreq_badass.o +obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o +obj-$(CONFIG_CPU_FREQ_GOV_DANCEDANCE) += cpufreq_dancedance.o +obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o -obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o +obj-$(CONFIG_CPU_FREQ_GOV_KTOONSERVATIVEQ) += cpufreq_ktoonservativeq.o +obj-$(CONFIG_CPU_FREQ_GOV_NIGHTMARE) += cpufreq_nightmare.o +obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o +obj-$(CONFIG_CPU_FREQ_GOV_PEGASUSQ) += cpufreq_pegasusq.o +obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o +obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o +obj-$(CONFIG_CPU_FREQ_GOV_SLP) += cpufreq_slp.o +obj-$(CONFIG_CPU_FREQ_GOV_SMARTASSH3) += cpufreq_smartassH3.o +obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o +obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o # CPUfreq cross-arch helpers -obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o +obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o ################################################################################## # x86 drivers. diff --git a/drivers/cpufreq/cpufreq_abyssplugv2.c b/drivers/cpufreq/cpufreq_abyssplugv2.c new file mode 100644 index 00000000..545801e4 --- /dev/null +++ b/drivers/cpufreq/cpufreq_abyssplugv2.c @@ -0,0 +1,1063 @@ +/* + * drivers/cpufreq/cpufreq_abyssplugv2.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * (C) 2012 Dennis Rassmann + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * bds is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) +#define DEF_FREQUENCY_UP_THRESHOLD (80) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (100000) +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +#define MICRO_FREQUENCY_UP_THRESHOLD (95) +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) +#define MIN_FREQUENCY_DOWN_DIFFERENTIAL (1) + + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. + * All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +#define POWERSAVE_BIAS_MAXLEVEL (1000) +#define POWERSAVE_BIAS_MINLEVEL (-1000) + +static void do_bds_timer(struct work_struct *work); +static int cpufreq_governor_bds(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ABYSSPLUG +static +#endif +struct cpufreq_governor cpufreq_gov_abyssplug = { + .name = "abyssplugv2", + .governor = cpufreq_governor_bds, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +/* Sampling types */ +enum {BDS_NORMAL_SAMPLE, BDS_SUB_SAMPLE}; + +struct cpu_bds_info_s { + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_iowait; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_lo; + unsigned int freq_lo_jiffies; + unsigned int freq_hi_jiffies; + unsigned int rate_mult; + int cpu; + unsigned int sample_type:1; + /* + * percpu mutex that serializes governor limit change with + * do_bds_timer invocation. We do not want do_bds_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_bds_info_s, od_cpu_bds_info); + +static inline void bds_timer_init(struct cpu_bds_info_s *bds_info); +static inline void bds_timer_exit(struct cpu_bds_info_s *bds_info); + +static unsigned int bds_enable; /* number of CPUs using this policy */ + +/* + * bds_mutex protects bds_enable in governor start/stop. + */ +static DEFINE_MUTEX(bds_mutex); + +static struct workqueue_struct *input_wq; + +static DEFINE_PER_CPU(struct work_struct, bds_refresh_work); + +static struct bds_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int ignore_nice; + unsigned int sampling_down_factor; + int powersave_bias; + unsigned int io_is_busy; +} bds_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .ignore_nice = 0, + .powersave_bias = 0, +}; + +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, + u64 *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + + return idle_time; +} + +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +/* + * Find right freq to be set now with powersave_bias on. + * Returns the freq_hi to be used right now and will set freq_hi_jiffies, + * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. + */ +static unsigned int powersave_bias_target(struct cpufreq_policy *policy, + unsigned int freq_next, + unsigned int relation) +{ + unsigned int freq_req, freq_avg; + unsigned int freq_hi, freq_lo; + unsigned int index = 0; + unsigned int jiffies_total, jiffies_hi, jiffies_lo; + int freq_reduc; + struct cpu_bds_info_s *bds_info = &per_cpu(od_cpu_bds_info, + policy->cpu); + + if (!bds_info->freq_table) { + bds_info->freq_lo = 0; + bds_info->freq_lo_jiffies = 0; + return freq_next; + } + + cpufreq_frequency_table_target(policy, bds_info->freq_table, freq_next, + relation, &index); + freq_req = bds_info->freq_table[index].frequency; + freq_reduc = freq_req * bds_tuners_ins.powersave_bias / 1000; + freq_avg = freq_req - freq_reduc; + + /* Find freq bounds for freq_avg in freq_table */ + index = 0; + cpufreq_frequency_table_target(policy, bds_info->freq_table, freq_avg, + CPUFREQ_RELATION_H, &index); + freq_lo = bds_info->freq_table[index].frequency; + index = 0; + cpufreq_frequency_table_target(policy, bds_info->freq_table, freq_avg, + CPUFREQ_RELATION_L, &index); + freq_hi = bds_info->freq_table[index].frequency; + + /* Find out how long we have to be in hi and lo freqs */ + if (freq_hi == freq_lo) { + bds_info->freq_lo = 0; + bds_info->freq_lo_jiffies = 0; + return freq_lo; + } + jiffies_total = usecs_to_jiffies(bds_tuners_ins.sampling_rate); + jiffies_hi = (freq_avg - freq_lo) * jiffies_total; + jiffies_hi += ((freq_hi - freq_lo) / 2); + jiffies_hi /= (freq_hi - freq_lo); + jiffies_lo = jiffies_total - jiffies_hi; + bds_info->freq_lo = freq_lo; + bds_info->freq_lo_jiffies = jiffies_lo; + bds_info->freq_hi_jiffies = jiffies_hi; + return freq_hi; +} + +static int abyssplug_powersave_bias_setspeed(struct cpufreq_policy *policy, + struct cpufreq_policy *altpolicy, + int level) +{ + if (level == POWERSAVE_BIAS_MAXLEVEL) { + /* maximum powersave; set to lowest frequency */ + __cpufreq_driver_target(policy, + (altpolicy) ? altpolicy->min : policy->min, + CPUFREQ_RELATION_L); + return 1; + } else if (level == POWERSAVE_BIAS_MINLEVEL) { + /* minimum powersave; set to highest frequency */ + __cpufreq_driver_target(policy, + (altpolicy) ? altpolicy->max : policy->max, + CPUFREQ_RELATION_H); + return 1; + } + return 0; +} + +static void abyssplug_powersave_bias_init_cpu(int cpu) +{ + struct cpu_bds_info_s *bds_info = &per_cpu(od_cpu_bds_info, cpu); + bds_info->freq_table = cpufreq_frequency_get_table(cpu); + bds_info->freq_lo = 0; +} + +static void abyssplug_powersave_bias_init(void) +{ + int i; + for_each_online_cpu(i) { + abyssplug_powersave_bias_init_cpu(i); + } +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_min); + +/* cpufreq_abyssplug Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", bds_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(down_differential, down_differential); +show_one(sampling_down_factor, sampling_down_factor); +show_one(ignore_nice_load, ignore_nice); + +static ssize_t show_powersave_bias +(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", bds_tuners_ins.powersave_bias); +} + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + bds_tuners_ins.sampling_rate = max(input, min_sampling_rate); + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + bds_tuners_ins.io_is_busy = !!input; + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + bds_tuners_ins.up_threshold = input; + return count; +} + +static ssize_t store_down_differential(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input >= bds_tuners_ins.up_threshold || + input < MIN_FREQUENCY_DOWN_DIFFERENTIAL) { + return -EINVAL; + } + + bds_tuners_ins.down_differential = input; + + return count; +} + +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input, j; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + bds_tuners_ins.sampling_down_factor = input; + + /* Reset down sampling multiplier in case it was active */ + for_each_online_cpu(j) { + struct cpu_bds_info_s *bds_info; + bds_info = &per_cpu(od_cpu_bds_info, j); + bds_info->rate_mult = 1; + } + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == bds_tuners_ins.ignore_nice) { /* nothing to do */ + return count; + } + bds_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_bds_info_s *bds_info; + bds_info = &per_cpu(od_cpu_bds_info, j); + bds_info->prev_cpu_idle = get_cpu_idle_time(j, + &bds_info->prev_cpu_wall); + if (bds_tuners_ins.ignore_nice) + bds_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + + } + return count; +} + +static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + int input = 0; + int bypass = 0; + int ret, cpu, reenable_timer, j; + struct cpu_bds_info_s *bds_info; + + struct cpumask cpus_timer_done; + cpumask_clear(&cpus_timer_done); + + ret = sscanf(buf, "%d", &input); + + if (ret != 1) + return -EINVAL; + + if (input >= POWERSAVE_BIAS_MAXLEVEL) { + input = POWERSAVE_BIAS_MAXLEVEL; + bypass = 1; + } else if (input <= POWERSAVE_BIAS_MINLEVEL) { + input = POWERSAVE_BIAS_MINLEVEL; + bypass = 1; + } + + if (input == bds_tuners_ins.powersave_bias) { + /* no change */ + return count; + } + + reenable_timer = ((bds_tuners_ins.powersave_bias == + POWERSAVE_BIAS_MAXLEVEL) || + (bds_tuners_ins.powersave_bias == + POWERSAVE_BIAS_MINLEVEL)); + + bds_tuners_ins.powersave_bias = input; + if (!bypass) { + if (reenable_timer) { + /* reinstate bds timer */ + for_each_online_cpu(cpu) { + if (lock_policy_rwsem_write(cpu) < 0) + continue; + + bds_info = &per_cpu(od_cpu_bds_info, cpu); + + for_each_cpu(j, &cpus_timer_done) { + if (!bds_info->cur_policy) { + printk(KERN_ERR + "%s Dbs policy is NULL\n", + __func__); + goto skip_this_cpu; + } + if (cpumask_test_cpu(j, bds_info-> + cur_policy->cpus)) + goto skip_this_cpu; + } + + cpumask_set_cpu(cpu, &cpus_timer_done); + if (bds_info->cur_policy) { + /* restart bds timer */ + bds_timer_init(bds_info); + } +skip_this_cpu: + unlock_policy_rwsem_write(cpu); + } + } + abyssplug_powersave_bias_init(); + } else { + /* running at maximum or minimum frequencies; cancel + bds timer as periodic load sampling is not necessary */ + for_each_online_cpu(cpu) { + if (lock_policy_rwsem_write(cpu) < 0) + continue; + + bds_info = &per_cpu(od_cpu_bds_info, cpu); + + for_each_cpu(j, &cpus_timer_done) { + if (!bds_info->cur_policy) { + printk(KERN_ERR + "%s Dbs policy is NULL\n", + __func__); + goto skip_this_cpu_bypass; + } + if (cpumask_test_cpu(j, bds_info-> + cur_policy->cpus)) + goto skip_this_cpu_bypass; + } + + cpumask_set_cpu(cpu, &cpus_timer_done); + + if (bds_info->cur_policy) { + /* cpu using abyssplug, cancel bds timer */ + mutex_lock(&bds_info->timer_mutex); + bds_timer_exit(bds_info); + + abyssplug_powersave_bias_setspeed( + bds_info->cur_policy, + NULL, + input); + + mutex_unlock(&bds_info->timer_mutex); + } +skip_this_cpu_bypass: + unlock_policy_rwsem_write(cpu); + } + } + + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(down_differential); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(powersave_bias); + + +static struct attribute *bds_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &up_threshold.attr, + &down_differential.attr, + &sampling_down_factor.attr, + &ignore_nice_load.attr, + &powersave_bias.attr, + &io_is_busy.attr, + NULL +}; + +static struct attribute_group bds_attr_group = { + .attrs = bds_attributes, + .name = "abyssplugv2", +}; + +/************************** sysfs end ************************/ + +static void bds_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ + if (bds_tuners_ins.powersave_bias) + freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); + else if (p->cur == p->max) + return; + + __cpufreq_driver_target(p, freq, bds_tuners_ins.powersave_bias ? + CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); +} + +static void bds_check_cpu(struct cpu_bds_info_s *this_bds_info) +{ + /* Extrapolated load of this CPU */ + unsigned int load_at_max_freq = 0; + unsigned int max_load_freq; + /* Current load across this CPU */ + unsigned int cur_load = 0; + + struct cpufreq_policy *policy; + unsigned int j; + static unsigned int phase = 0; + static unsigned int counter = 0; + unsigned int new_phase_max = 0; + + this_bds_info->freq_lo = 0; + policy = this_bds_info->cur_policy; + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate, we look for a the lowest + * frequency which can sustain the load while keeping idle time over + * 30%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of current frequency + */ + + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + + for_each_cpu(j, policy->cpus) { + struct cpu_bds_info_s *j_bds_info; + cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load_freq; + int freq_avg; + + j_bds_info = &per_cpu(od_cpu_bds_info, j); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) (cur_wall_time - j_bds_info->prev_cpu_wall); + j_bds_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) (cur_idle_time - j_bds_info->prev_cpu_idle); + j_bds_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) (cur_iowait_time - j_bds_info->prev_cpu_iowait); + j_bds_info->prev_cpu_iowait = cur_iowait_time; + + if (bds_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_bds_info->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_bds_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + /* + * For the purpose of abyssplug, waiting for disk IO is an + * indication that you're performance critical, and not that + * the system is actually idle. So subtract the iowait time + * from the cpu idle time. + */ + + if (bds_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + cur_load = 100 * (wall_time - idle_time) / wall_time; + + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = cur_load * freq_avg; + if (load_freq > max_load_freq) + max_load_freq = load_freq; + } + /* calculate the scaled load across CPU */ + load_at_max_freq = (cur_load * policy->cur)/policy->cpuinfo.max_freq; + + cpufreq_notify_utilization(policy, load_at_max_freq); + + /* Check for frequency increase */ + if (max_load_freq > bds_tuners_ins.up_threshold * policy->cur) { + /* If switching to max speed, apply sampling_down_factor */ + + /* busy phase */ + if (policy->cur < policy->max) + this_bds_info->rate_mult = + bds_tuners_ins.sampling_down_factor; + bds_freq_increase(policy, policy->max); + return; + } + + + /* Check for frequency decrease */ + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) + return; + + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (max_load_freq < + (bds_tuners_ins.up_threshold - bds_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + freq_next = max_load_freq / + (bds_tuners_ins.up_threshold - + bds_tuners_ins.down_differential); + + /* No longer fully busy, reset rate_mult */ + this_bds_info->rate_mult = 1; + + if (freq_next < policy->min) + freq_next = policy->min; + + if (!bds_tuners_ins.powersave_bias) { + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } else { + int freq = powersave_bias_target(policy, freq_next, + CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } + } +} + +static void do_bds_timer(struct work_struct *work) +{ + struct cpu_bds_info_s *bds_info = + container_of(work, struct cpu_bds_info_s, work.work); + unsigned int cpu = bds_info->cpu; + int sample_type = bds_info->sample_type; + + int delay; + + mutex_lock(&bds_info->timer_mutex); + + /* Common NORMAL_SAMPLE setup */ + bds_info->sample_type = BDS_NORMAL_SAMPLE; + if (!bds_tuners_ins.powersave_bias || + sample_type == BDS_NORMAL_SAMPLE) { + bds_check_cpu(bds_info); + if (bds_info->freq_lo) { + /* Setup timer for SUB_SAMPLE */ + bds_info->sample_type = BDS_SUB_SAMPLE; + delay = bds_info->freq_hi_jiffies; + } else { + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + delay = usecs_to_jiffies(bds_tuners_ins.sampling_rate + * bds_info->rate_mult); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + } + } else { + __cpufreq_driver_target(bds_info->cur_policy, + bds_info->freq_lo, CPUFREQ_RELATION_H); + delay = bds_info->freq_lo_jiffies; + } + schedule_delayed_work_on(cpu, &bds_info->work, delay); + mutex_unlock(&bds_info->timer_mutex); +} + +static inline void bds_timer_init(struct cpu_bds_info_s *bds_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(bds_tuners_ins.sampling_rate); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + bds_info->sample_type = BDS_NORMAL_SAMPLE; + INIT_DELAYED_WORK_DEFERRABLE(&bds_info->work, do_bds_timer); + schedule_delayed_work_on(bds_info->cpu, &bds_info->work, delay); +} + +static inline void bds_timer_exit(struct cpu_bds_info_s *bds_info) +{ + cancel_delayed_work_sync(&bds_info->work); +} + +/* + * Not all CPUs want IO time to be accounted as busy; this dependson how + * efficient idling at a higher frequency/voltage is. + * Pavel Machek says this is not so for various generations of AMD and old + * Intel systems. + * Mike Chan (androidlcom) calis this is also not true for ARM. + * Because of this, whitelist specific known (series) of CPUs by default, and + * leave all others up to the user. + */ +static int should_io_be_busy(void) +{ +#if defined(CONFIG_X86) + /* + * For Intel, Core 2 (model 15) andl later have an efficient idle. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model >= 15) + return 1; +#endif + return 0; +} + +static void bds_refresh_callback(struct work_struct *unused) +{ + struct cpufreq_policy *policy; + struct cpu_bds_info_s *this_bds_info; + unsigned int cpu = smp_processor_id(); + + if (lock_policy_rwsem_write(cpu) < 0) + return; + + this_bds_info = &per_cpu(od_cpu_bds_info, cpu); + policy = this_bds_info->cur_policy; + if (!policy) { + /* CPU not using abyssplug governor */ + unlock_policy_rwsem_write(cpu); + return; + } + + if (policy->cur < policy->max) { + policy->cur = policy->max; + + __cpufreq_driver_target(policy, policy->max, + CPUFREQ_RELATION_L); + this_bds_info->prev_cpu_idle = get_cpu_idle_time(cpu, + &this_bds_info->prev_cpu_wall); + } + unlock_policy_rwsem_write(cpu); +} + +static unsigned int enable_bds_input_event; +static void bds_input_event(struct input_handle *handle, unsigned int type, + unsigned int code, int value) +{ + int i; + + if (enable_bds_input_event) { + + if ((bds_tuners_ins.powersave_bias == POWERSAVE_BIAS_MAXLEVEL) || + (bds_tuners_ins.powersave_bias == POWERSAVE_BIAS_MINLEVEL)) { + /* nothing to do */ + return; + } + + for_each_online_cpu(i) { + queue_work_on(i, input_wq, &per_cpu(bds_refresh_work, i)); + } + } +} + +static int bds_input_connect(struct input_handler *handler, + struct input_dev *dev, const struct input_device_id *id) +{ + struct input_handle *handle; + int error; + + handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = "cpufreq"; + + error = input_register_handle(handle); + if (error) + goto err2; + + error = input_open_device(handle); + if (error) + goto err1; + + return 0; +err1: + input_unregister_handle(handle); +err2: + kfree(handle); + return error; +} + +static void bds_input_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static const struct input_device_id bds_ids[] = { + { .driver_info = 1 }, + { }, +}; + +static struct input_handler bds_input_handler = { + .event = bds_input_event, + .connect = bds_input_connect, + .disconnect = bds_input_disconnect, + .name = "cpufreq_abyss", + .id_table = bds_ids, +}; + +static int cpufreq_governor_bds(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_bds_info_s *this_bds_info; + unsigned int j; + int rc; + + this_bds_info = &per_cpu(od_cpu_bds_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&bds_mutex); + + bds_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_bds_info_s *j_bds_info; + j_bds_info = &per_cpu(od_cpu_bds_info, j); + j_bds_info->cur_policy = policy; + + j_bds_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_bds_info->prev_cpu_wall); + if (bds_tuners_ins.ignore_nice) { + j_bds_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + } + this_bds_info->cpu = cpu; + this_bds_info->rate_mult = 1; + abyssplug_powersave_bias_init_cpu(cpu); + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (bds_enable == 1) { + unsigned int latency; + + rc = sysfs_create_group(cpufreq_global_kobject, + &bds_attr_group); + if (rc) { + mutex_unlock(&bds_mutex); + return rc; + } + + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + bds_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); + bds_tuners_ins.io_is_busy = should_io_be_busy(); + } + if (!cpu) + rc = input_register_handler(&bds_input_handler); + mutex_unlock(&bds_mutex); + + mutex_init(&this_bds_info->timer_mutex); + + if (!abyssplug_powersave_bias_setspeed( + this_bds_info->cur_policy, + NULL, + bds_tuners_ins.powersave_bias)) + bds_timer_init(this_bds_info); + break; + + case CPUFREQ_GOV_STOP: + bds_timer_exit(this_bds_info); + + mutex_lock(&bds_mutex); + mutex_destroy(&this_bds_info->timer_mutex); + bds_enable--; + /* If device is being removed, policy is no longer + * valid. */ + this_bds_info->cur_policy = NULL; + if (!cpu) + input_unregister_handler(&bds_input_handler); + mutex_unlock(&bds_mutex); + if (!bds_enable) + sysfs_remove_group(cpufreq_global_kobject, + &bds_attr_group); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&this_bds_info->timer_mutex); + if (policy->max < this_bds_info->cur_policy->cur) + __cpufreq_driver_target(this_bds_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_bds_info->cur_policy->cur) + __cpufreq_driver_target(this_bds_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + else if (bds_tuners_ins.powersave_bias != 0) + abyssplug_powersave_bias_setspeed( + this_bds_info->cur_policy, + policy, + bds_tuners_ins.powersave_bias); + mutex_unlock(&this_bds_info->timer_mutex); + break; + } + return 0; +} + +static int __init cpufreq_gov_bds_init(void) +{ + cputime64_t wall; + u64 idle_time; + unsigned int i; + int cpu = get_cpu(); + + idle_time = get_cpu_idle_time_us(cpu, &wall); + put_cpu(); + if (idle_time != -1ULL) { + /* Idle micro accounting is supported. Use finer thresholds */ + bds_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + bds_tuners_ins.down_differential = + MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + /* + * In no_hz/micro accounting case we set the minimum frequency + * not depending on HZ, but fixed (very low). The deferred + * timer might skip some samples if idle/sleeping as needed. + */ + min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + } else { + /* For correct statistics, we need 10 ticks for each measure */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + } + + input_wq = create_workqueue("iewq"); + if (!input_wq) { + printk(KERN_ERR "Failed to create iewq workqueue\n"); + return -EFAULT; + } + for_each_possible_cpu(i) { + INIT_WORK(&per_cpu(bds_refresh_work, i), bds_refresh_callback); + } + + return cpufreq_register_governor(&cpufreq_gov_abyssplug); +} + +static void __exit cpufreq_gov_bds_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_abyssplug); + destroy_workqueue(input_wq); +} + +static int set_enable_bds_input_event_param(const char *val, struct kernel_param *kp) +{ + int ret = 0; + + ret = param_set_uint(val, kp); + if (ret) + pr_err("%s: error setting value %d\n", __func__, ret); + + return ret; +} +module_param_call(enable_bds_input_event, set_enable_bds_input_event_param, param_get_uint, + &enable_bds_input_event, S_IWUSR | S_IRUGO); + + +MODULE_AUTHOR("Venkatesh Pallipadi "); +MODULE_AUTHOR("Alexey Starikovskiy "); +MODULE_AUTHOR("Dennis Rassmann "); +MODULE_DESCRIPTION("'cpufreq_abyssplugv2' - An abyssplug cpufreq governor based on ondemand"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ABYSSPLUG +fs_initcall(cpufreq_gov_bds_init); +#else +module_init(cpufreq_gov_bds_init); +#endif +module_exit(cpufreq_gov_bds_exit); + From 63c2d12ca0afb088db005c490be71ea2c8bb4be1 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Mon, 12 Aug 2013 12:01:40 -0400 Subject: [PATCH 32/35] CPURFREQ: Add govs --- drivers/cpufreq/Kconfig | 98 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 2e07ae54..1f439690 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -108,6 +108,35 @@ config CPU_FREQ_DEFAULT_GOV_DANCEDANCE select CPU_FREQ_GOV_DANCEDANCE help +config CPU_FREQ_DEFAULT_GOV_INTERACTIVE + bool "interactive" + select CPU_FREQ_GOV_INTERACTIVE + help + Use the CPUFreq governor 'interactive' as default. This allows + you to get a full dynamic cpu frequency capable system by simply + loading your cpufreq low-level hardware driver, using the + 'interactive' governor for latency-sensitive workloads. + +config CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND + bool "intellidemand" + select CPU_FREQ_GOV_INTELLIDEMAND + help + Use the CPUFreq governor 'intellidemand' as default. This is + based on Ondemand with browsing detection based on GPU loading + +config CPU_FREQ_DEFAULT_GOV_KTOONSERVATIVEQ + bool "ktoonservativeq" + select CPU_FREQ_GOV_KTOONSERVATIVEQ + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'ktoonservativeq' as default. This allows + you to get a full dynamic frequency capable system by simply + loading your cpufreq low-level hardware driver. + Be aware that not all cpufreq drivers support the ktoonservativeq + governor. If unsure have a look at the help section of the + driver. Fallback governor will be the performance governor. This + governor adds the capability of hotpluging. + config CPU_FREQ_DEFAULT_GOV_NIGHTMARE bool "nightmare" select CPU_FREQ_GOV_NIGHTMARE @@ -125,6 +154,12 @@ config CPU_FREQ_DEFAULT_GOV_ONDEMAND governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. +config CPU_FREQ_DEFAULT_GOV_PEGASUSQ + bool "pegasusq" + select CPU_FREQ_GOV_PEGASUSQ + help + Use the CPUFreq governor 'pegasusq' as default. + config CPU_FREQ_DEFAULT_GOV_SLP bool "slp" select CPU_FREQ_GOV_SLP @@ -257,6 +292,65 @@ config CPU_FREQ_GOV_DANCEDANCE tristate "'dancedance' cpufreq governor" depends on CPU_FREQ +config CPU_FREQ_GOV_INTERACTIVE + tristate "'interactive' cpufreq policy governor" + help + 'interactive' - This driver adds a dynamic cpufreq policy governor + designed for latency-sensitive workloads. + + This governor attempts to reduce the latency of clock + increases so that the system is more responsive to + interactive workloads. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_interactive. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +config CPU_FREQ_GOV_INTELLIDEMAND + tristate "'intellidemand' cpufreq policy governor" + select CPU_FREQ_TABLE + help + 'intellidemand' - This driver adds a dynamic cpufreq policy governor. + The governor does a periodic polling and + changes frequency based on the CPU utilization. + The support for this governor depends on CPU capability to + do fast frequency switching (i.e, very low latency frequency + transitions). with browsing detection based on GPU loading + + To compile this driver as a module, choose M here: the + module will be called cpufreq_ondemand. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +config CPU_FREQ_GOV_KTOONSERVATIVEQ + tristate "'ktoonservativeq' cpufreq governor" + depends on CPU_FREQ + help + 'ktoonservativeq' - this driver is rather similar to the 'ondemand' + governor both in its source code and its purpose, the difference is + its optimisation for better suitability in a battery powered + environment. The frequency is gracefully increased and decreased + rather than jumping to 100% when speed is required. + + If you have a desktop machine then you should really be considering + the 'ondemand' governor instead, however if you are using a laptop, + PDA or even an AMD64 based computer (due to the unacceptable + step-by-step latency issues between the minimum and maximum frequency + transitions in the CPU) you will probably want to use this governor. + This governor adds the capability of hotpluging. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_ktoonservativeq. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + config CPU_FREQ_GOV_NIGHTMARE tristate "'nightmare' cpufreq governor" depends on CPU_FREQ @@ -290,6 +384,9 @@ config CPU_FREQ_GOV_PERFORMANCE If in doubt, say Y. +config CPU_FREQ_GOV_PEGASUSQ + tristate "'pegasusq' cpufreq policy governor" + config CPU_FREQ_GOV_POWERSAVE tristate "'powersave' governor" help @@ -356,3 +453,4 @@ endmenu endif endmenu + From 0a10b9096498c1c8da02391d51e7dead1cba0cc1 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Mon, 12 Aug 2013 12:30:51 -0400 Subject: [PATCH 33/35] RM abyssplugv2 & add back smartassv2 and lionheart --- drivers/cpufreq/Kconfig | 49 +- drivers/cpufreq/Makefile | 6 +- drivers/cpufreq/cpufreq_abyssplugv2.c | 1063 ------------------------- 3 files changed, 17 insertions(+), 1101 deletions(-) delete mode 100644 drivers/cpufreq/cpufreq_abyssplugv2.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 1f439690..57d2c3a0 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -103,6 +103,16 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE governor. If unsure have a look at the help section of the driver. Fallback governor will be the performance governor. +config CPU_FREQ_DEFAULT_GOV_SMARTASSV2 + bool "badass" + select CPU_FREQ_GOV_SMARTASSV2 + help + +config CPU_FREQ_DEFAULT_GOV_LIONHEART + bool "lionheart" + select CPU_FREQ_GOV_LIONHEART + help + config CPU_FREQ_DEFAULT_GOV_DANCEDANCE bool "dancedance" select CPU_FREQ_GOV_DANCEDANCE @@ -124,19 +134,6 @@ config CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND Use the CPUFreq governor 'intellidemand' as default. This is based on Ondemand with browsing detection based on GPU loading -config CPU_FREQ_DEFAULT_GOV_KTOONSERVATIVEQ - bool "ktoonservativeq" - select CPU_FREQ_GOV_KTOONSERVATIVEQ - select CPU_FREQ_GOV_PERFORMANCE - help - Use the CPUFreq governor 'ktoonservativeq' as default. This allows - you to get a full dynamic frequency capable system by simply - loading your cpufreq low-level hardware driver. - Be aware that not all cpufreq drivers support the ktoonservativeq - governor. If unsure have a look at the help section of the - driver. Fallback governor will be the performance governor. This - governor adds the capability of hotpluging. - config CPU_FREQ_DEFAULT_GOV_NIGHTMARE bool "nightmare" select CPU_FREQ_GOV_NIGHTMARE @@ -327,29 +324,15 @@ config CPU_FREQ_GOV_INTELLIDEMAND If in doubt, say N. -config CPU_FREQ_GOV_KTOONSERVATIVEQ - tristate "'ktoonservativeq' cpufreq governor" +config CPU_FREQ_GOV_LIONHEART + tristate "lionheart" depends on CPU_FREQ help - 'ktoonservativeq' - this driver is rather similar to the 'ondemand' - governor both in its source code and its purpose, the difference is - its optimisation for better suitability in a battery powered - environment. The frequency is gracefully increased and decreased - rather than jumping to 100% when speed is required. - - If you have a desktop machine then you should really be considering - the 'ondemand' governor instead, however if you are using a laptop, - PDA or even an AMD64 based computer (due to the unacceptable - step-by-step latency issues between the minimum and maximum frequency - transitions in the CPU) you will probably want to use this governor. - This governor adds the capability of hotpluging. - To compile this driver as a module, choose M here: the - module will be called cpufreq_ktoonservativeq. - - For details, take a look at linux/Documentation/cpu-freq. - - If in doubt, say N. +config CPU_FREQ_GOV_SMARTASSV2 + tristate "smartassv2" + depends on CPU_FREQ + help config CPU_FREQ_GOV_NIGHTMARE tristate "'nightmare' cpufreq governor" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index d553141e..8df2a4fd 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -5,24 +5,20 @@ obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o # CPUfreq governors obj-$(CONFIG_CPU_FREQ_GOV_ABYSSPLUG) += cpufreq_abyssplug.o -obj-$(CONFIG_CPU_FREQ_GOV_ABYSSPLUG) += cpufreq_abyssplugv2.o obj-$(CONFIG_CPU_FREQ_GOV_ADAPTIVE) += cpufreq_adaptive.o obj-$(CONFIG_CPU_FREQ_GOV_ASSWAX) += cpufreq_asswax.o obj-$(CONFIG_CPU_FREQ_GOV_BADASS) += cpufreq_badass.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o obj-$(CONFIG_CPU_FREQ_GOV_DANCEDANCE) += cpufreq_dancedance.o -obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND) += cpufreq_intellidemand.o -obj-$(CONFIG_CPU_FREQ_GOV_KTOONSERVATIVEQ) += cpufreq_ktoonservativeq.o obj-$(CONFIG_CPU_FREQ_GOV_NIGHTMARE) += cpufreq_nightmare.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o -obj-$(CONFIG_CPU_FREQ_GOV_PEGASUSQ) += cpufreq_pegasusq.o obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o -obj-$(CONFIG_CPU_FREQ_GOV_SLP) += cpufreq_slp.o obj-$(CONFIG_CPU_FREQ_GOV_SMARTASSH3) += cpufreq_smartassH3.o obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o +obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/drivers/cpufreq/cpufreq_abyssplugv2.c b/drivers/cpufreq/cpufreq_abyssplugv2.c deleted file mode 100644 index 545801e4..00000000 --- a/drivers/cpufreq/cpufreq_abyssplugv2.c +++ /dev/null @@ -1,1063 +0,0 @@ -/* - * drivers/cpufreq/cpufreq_abyssplugv2.c - * - * Copyright (C) 2001 Russell King - * (C) 2003 Venkatesh Pallipadi . - * Jun Nakajima - * (C) 2012 Dennis Rassmann - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * bds is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ - -#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) -#define DEF_FREQUENCY_UP_THRESHOLD (80) -#define DEF_SAMPLING_DOWN_FACTOR (1) -#define MAX_SAMPLING_DOWN_FACTOR (100000) -#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) -#define MICRO_FREQUENCY_UP_THRESHOLD (95) -#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) -#define MIN_FREQUENCY_UP_THRESHOLD (11) -#define MAX_FREQUENCY_UP_THRESHOLD (100) -#define MIN_FREQUENCY_DOWN_DIFFERENTIAL (1) - - -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) - * this governor will not work. - * All times here are in uS. - */ -#define MIN_SAMPLING_RATE_RATIO (2) - -static unsigned int min_sampling_rate; - -#define LATENCY_MULTIPLIER (1000) -#define MIN_LATENCY_MULTIPLIER (100) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) - -#define POWERSAVE_BIAS_MAXLEVEL (1000) -#define POWERSAVE_BIAS_MINLEVEL (-1000) - -static void do_bds_timer(struct work_struct *work); -static int cpufreq_governor_bds(struct cpufreq_policy *policy, - unsigned int event); - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ABYSSPLUG -static -#endif -struct cpufreq_governor cpufreq_gov_abyssplug = { - .name = "abyssplugv2", - .governor = cpufreq_governor_bds, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, -}; - -/* Sampling types */ -enum {BDS_NORMAL_SAMPLE, BDS_SUB_SAMPLE}; - -struct cpu_bds_info_s { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_iowait; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - struct cpufreq_frequency_table *freq_table; - unsigned int freq_lo; - unsigned int freq_lo_jiffies; - unsigned int freq_hi_jiffies; - unsigned int rate_mult; - int cpu; - unsigned int sample_type:1; - /* - * percpu mutex that serializes governor limit change with - * do_bds_timer invocation. We do not want do_bds_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_bds_info_s, od_cpu_bds_info); - -static inline void bds_timer_init(struct cpu_bds_info_s *bds_info); -static inline void bds_timer_exit(struct cpu_bds_info_s *bds_info); - -static unsigned int bds_enable; /* number of CPUs using this policy */ - -/* - * bds_mutex protects bds_enable in governor start/stop. - */ -static DEFINE_MUTEX(bds_mutex); - -static struct workqueue_struct *input_wq; - -static DEFINE_PER_CPU(struct work_struct, bds_refresh_work); - -static struct bds_tuners { - unsigned int sampling_rate; - unsigned int up_threshold; - unsigned int down_differential; - unsigned int ignore_nice; - unsigned int sampling_down_factor; - int powersave_bias; - unsigned int io_is_busy; -} bds_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, - .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, - .ignore_nice = 0, - .powersave_bias = 0, -}; - -static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, - u64 *wall) -{ - u64 idle_time; - u64 cur_wall_time; - u64 busy_time; - - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - - busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - - idle_time = cur_wall_time - busy_time; - if (wall) - *wall = jiffies_to_usecs(cur_wall_time); - - return jiffies_to_usecs(idle_time); -} - -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) -{ - u64 idle_time = get_cpu_idle_time_us(cpu, wall); - - if (idle_time == -1ULL) - return get_cpu_idle_time_jiffy(cpu, wall); - - return idle_time; -} - -static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) -{ - u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); - - if (iowait_time == -1ULL) - return 0; - - return iowait_time; -} - -/* - * Find right freq to be set now with powersave_bias on. - * Returns the freq_hi to be used right now and will set freq_hi_jiffies, - * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. - */ -static unsigned int powersave_bias_target(struct cpufreq_policy *policy, - unsigned int freq_next, - unsigned int relation) -{ - unsigned int freq_req, freq_avg; - unsigned int freq_hi, freq_lo; - unsigned int index = 0; - unsigned int jiffies_total, jiffies_hi, jiffies_lo; - int freq_reduc; - struct cpu_bds_info_s *bds_info = &per_cpu(od_cpu_bds_info, - policy->cpu); - - if (!bds_info->freq_table) { - bds_info->freq_lo = 0; - bds_info->freq_lo_jiffies = 0; - return freq_next; - } - - cpufreq_frequency_table_target(policy, bds_info->freq_table, freq_next, - relation, &index); - freq_req = bds_info->freq_table[index].frequency; - freq_reduc = freq_req * bds_tuners_ins.powersave_bias / 1000; - freq_avg = freq_req - freq_reduc; - - /* Find freq bounds for freq_avg in freq_table */ - index = 0; - cpufreq_frequency_table_target(policy, bds_info->freq_table, freq_avg, - CPUFREQ_RELATION_H, &index); - freq_lo = bds_info->freq_table[index].frequency; - index = 0; - cpufreq_frequency_table_target(policy, bds_info->freq_table, freq_avg, - CPUFREQ_RELATION_L, &index); - freq_hi = bds_info->freq_table[index].frequency; - - /* Find out how long we have to be in hi and lo freqs */ - if (freq_hi == freq_lo) { - bds_info->freq_lo = 0; - bds_info->freq_lo_jiffies = 0; - return freq_lo; - } - jiffies_total = usecs_to_jiffies(bds_tuners_ins.sampling_rate); - jiffies_hi = (freq_avg - freq_lo) * jiffies_total; - jiffies_hi += ((freq_hi - freq_lo) / 2); - jiffies_hi /= (freq_hi - freq_lo); - jiffies_lo = jiffies_total - jiffies_hi; - bds_info->freq_lo = freq_lo; - bds_info->freq_lo_jiffies = jiffies_lo; - bds_info->freq_hi_jiffies = jiffies_hi; - return freq_hi; -} - -static int abyssplug_powersave_bias_setspeed(struct cpufreq_policy *policy, - struct cpufreq_policy *altpolicy, - int level) -{ - if (level == POWERSAVE_BIAS_MAXLEVEL) { - /* maximum powersave; set to lowest frequency */ - __cpufreq_driver_target(policy, - (altpolicy) ? altpolicy->min : policy->min, - CPUFREQ_RELATION_L); - return 1; - } else if (level == POWERSAVE_BIAS_MINLEVEL) { - /* minimum powersave; set to highest frequency */ - __cpufreq_driver_target(policy, - (altpolicy) ? altpolicy->max : policy->max, - CPUFREQ_RELATION_H); - return 1; - } - return 0; -} - -static void abyssplug_powersave_bias_init_cpu(int cpu) -{ - struct cpu_bds_info_s *bds_info = &per_cpu(od_cpu_bds_info, cpu); - bds_info->freq_table = cpufreq_frequency_get_table(cpu); - bds_info->freq_lo = 0; -} - -static void abyssplug_powersave_bias_init(void) -{ - int i; - for_each_online_cpu(i) { - abyssplug_powersave_bias_init_cpu(i); - } -} - -/************************** sysfs interface ************************/ - -static ssize_t show_sampling_rate_min(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - return sprintf(buf, "%u\n", min_sampling_rate); -} - -define_one_global_ro(sampling_rate_min); - -/* cpufreq_abyssplug Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", bds_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(io_is_busy, io_is_busy); -show_one(up_threshold, up_threshold); -show_one(down_differential, down_differential); -show_one(sampling_down_factor, sampling_down_factor); -show_one(ignore_nice_load, ignore_nice); - -static ssize_t show_powersave_bias -(struct kobject *kobj, struct attribute *attr, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%d\n", bds_tuners_ins.powersave_bias); -} - -static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - bds_tuners_ins.sampling_rate = max(input, min_sampling_rate); - return count; -} - -static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - bds_tuners_ins.io_is_busy = !!input; - return count; -} - -static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || - input < MIN_FREQUENCY_UP_THRESHOLD) { - return -EINVAL; - } - bds_tuners_ins.up_threshold = input; - return count; -} - -static ssize_t store_down_differential(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input >= bds_tuners_ins.up_threshold || - input < MIN_FREQUENCY_DOWN_DIFFERENTIAL) { - return -EINVAL; - } - - bds_tuners_ins.down_differential = input; - - return count; -} - -static ssize_t store_sampling_down_factor(struct kobject *a, - struct attribute *b, const char *buf, size_t count) -{ - unsigned int input, j; - int ret; - ret = sscanf(buf, "%u", &input); - - if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) - return -EINVAL; - bds_tuners_ins.sampling_down_factor = input; - - /* Reset down sampling multiplier in case it was active */ - for_each_online_cpu(j) { - struct cpu_bds_info_s *bds_info; - bds_info = &per_cpu(od_cpu_bds_info, j); - bds_info->rate_mult = 1; - } - return count; -} - -static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - unsigned int input; - int ret; - - unsigned int j; - - ret = sscanf(buf, "%u", &input); - if (ret != 1) - return -EINVAL; - - if (input > 1) - input = 1; - - if (input == bds_tuners_ins.ignore_nice) { /* nothing to do */ - return count; - } - bds_tuners_ins.ignore_nice = input; - - /* we need to re-evaluate prev_cpu_idle */ - for_each_online_cpu(j) { - struct cpu_bds_info_s *bds_info; - bds_info = &per_cpu(od_cpu_bds_info, j); - bds_info->prev_cpu_idle = get_cpu_idle_time(j, - &bds_info->prev_cpu_wall); - if (bds_tuners_ins.ignore_nice) - bds_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - - } - return count; -} - -static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, - const char *buf, size_t count) -{ - int input = 0; - int bypass = 0; - int ret, cpu, reenable_timer, j; - struct cpu_bds_info_s *bds_info; - - struct cpumask cpus_timer_done; - cpumask_clear(&cpus_timer_done); - - ret = sscanf(buf, "%d", &input); - - if (ret != 1) - return -EINVAL; - - if (input >= POWERSAVE_BIAS_MAXLEVEL) { - input = POWERSAVE_BIAS_MAXLEVEL; - bypass = 1; - } else if (input <= POWERSAVE_BIAS_MINLEVEL) { - input = POWERSAVE_BIAS_MINLEVEL; - bypass = 1; - } - - if (input == bds_tuners_ins.powersave_bias) { - /* no change */ - return count; - } - - reenable_timer = ((bds_tuners_ins.powersave_bias == - POWERSAVE_BIAS_MAXLEVEL) || - (bds_tuners_ins.powersave_bias == - POWERSAVE_BIAS_MINLEVEL)); - - bds_tuners_ins.powersave_bias = input; - if (!bypass) { - if (reenable_timer) { - /* reinstate bds timer */ - for_each_online_cpu(cpu) { - if (lock_policy_rwsem_write(cpu) < 0) - continue; - - bds_info = &per_cpu(od_cpu_bds_info, cpu); - - for_each_cpu(j, &cpus_timer_done) { - if (!bds_info->cur_policy) { - printk(KERN_ERR - "%s Dbs policy is NULL\n", - __func__); - goto skip_this_cpu; - } - if (cpumask_test_cpu(j, bds_info-> - cur_policy->cpus)) - goto skip_this_cpu; - } - - cpumask_set_cpu(cpu, &cpus_timer_done); - if (bds_info->cur_policy) { - /* restart bds timer */ - bds_timer_init(bds_info); - } -skip_this_cpu: - unlock_policy_rwsem_write(cpu); - } - } - abyssplug_powersave_bias_init(); - } else { - /* running at maximum or minimum frequencies; cancel - bds timer as periodic load sampling is not necessary */ - for_each_online_cpu(cpu) { - if (lock_policy_rwsem_write(cpu) < 0) - continue; - - bds_info = &per_cpu(od_cpu_bds_info, cpu); - - for_each_cpu(j, &cpus_timer_done) { - if (!bds_info->cur_policy) { - printk(KERN_ERR - "%s Dbs policy is NULL\n", - __func__); - goto skip_this_cpu_bypass; - } - if (cpumask_test_cpu(j, bds_info-> - cur_policy->cpus)) - goto skip_this_cpu_bypass; - } - - cpumask_set_cpu(cpu, &cpus_timer_done); - - if (bds_info->cur_policy) { - /* cpu using abyssplug, cancel bds timer */ - mutex_lock(&bds_info->timer_mutex); - bds_timer_exit(bds_info); - - abyssplug_powersave_bias_setspeed( - bds_info->cur_policy, - NULL, - input); - - mutex_unlock(&bds_info->timer_mutex); - } -skip_this_cpu_bypass: - unlock_policy_rwsem_write(cpu); - } - } - - return count; -} - -define_one_global_rw(sampling_rate); -define_one_global_rw(io_is_busy); -define_one_global_rw(up_threshold); -define_one_global_rw(down_differential); -define_one_global_rw(sampling_down_factor); -define_one_global_rw(ignore_nice_load); -define_one_global_rw(powersave_bias); - - -static struct attribute *bds_attributes[] = { - &sampling_rate_min.attr, - &sampling_rate.attr, - &up_threshold.attr, - &down_differential.attr, - &sampling_down_factor.attr, - &ignore_nice_load.attr, - &powersave_bias.attr, - &io_is_busy.attr, - NULL -}; - -static struct attribute_group bds_attr_group = { - .attrs = bds_attributes, - .name = "abyssplugv2", -}; - -/************************** sysfs end ************************/ - -static void bds_freq_increase(struct cpufreq_policy *p, unsigned int freq) -{ - if (bds_tuners_ins.powersave_bias) - freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); - else if (p->cur == p->max) - return; - - __cpufreq_driver_target(p, freq, bds_tuners_ins.powersave_bias ? - CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); -} - -static void bds_check_cpu(struct cpu_bds_info_s *this_bds_info) -{ - /* Extrapolated load of this CPU */ - unsigned int load_at_max_freq = 0; - unsigned int max_load_freq; - /* Current load across this CPU */ - unsigned int cur_load = 0; - - struct cpufreq_policy *policy; - unsigned int j; - static unsigned int phase = 0; - static unsigned int counter = 0; - unsigned int new_phase_max = 0; - - this_bds_info->freq_lo = 0; - policy = this_bds_info->cur_policy; - - /* - * Every sampling_rate, we check, if current idle time is less - * than 20% (default), then we try to increase frequency - * Every sampling_rate, we look for a the lowest - * frequency which can sustain the load while keeping idle time over - * 30%. If such a frequency exist, we try to decrease to this frequency. - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of current frequency - */ - - /* Get Absolute Load - in terms of freq */ - max_load_freq = 0; - - for_each_cpu(j, policy->cpus) { - struct cpu_bds_info_s *j_bds_info; - cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; - unsigned int idle_time, wall_time, iowait_time; - unsigned int load_freq; - int freq_avg; - - j_bds_info = &per_cpu(od_cpu_bds_info, j); - - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - - wall_time = (unsigned int) (cur_wall_time - j_bds_info->prev_cpu_wall); - j_bds_info->prev_cpu_wall = cur_wall_time; - - idle_time = (unsigned int) (cur_idle_time - j_bds_info->prev_cpu_idle); - j_bds_info->prev_cpu_idle = cur_idle_time; - - iowait_time = (unsigned int) (cur_iowait_time - j_bds_info->prev_cpu_iowait); - j_bds_info->prev_cpu_iowait = cur_iowait_time; - - if (bds_tuners_ins.ignore_nice) { - cputime64_t cur_nice; - unsigned long cur_nice_jiffies; - - cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - - j_bds_info->prev_cpu_nice; - /* - * Assumption: nice time between sampling periods will - * be less than 2^32 jiffies for 32 bit sys - */ - cur_nice_jiffies = (unsigned long) - cputime64_to_jiffies64(cur_nice); - - j_bds_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - idle_time += jiffies_to_usecs(cur_nice_jiffies); - } - - /* - * For the purpose of abyssplug, waiting for disk IO is an - * indication that you're performance critical, and not that - * the system is actually idle. So subtract the iowait time - * from the cpu idle time. - */ - - if (bds_tuners_ins.io_is_busy && idle_time >= iowait_time) - idle_time -= iowait_time; - - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - cur_load = 100 * (wall_time - idle_time) / wall_time; - - freq_avg = __cpufreq_driver_getavg(policy, j); - if (freq_avg <= 0) - freq_avg = policy->cur; - - load_freq = cur_load * freq_avg; - if (load_freq > max_load_freq) - max_load_freq = load_freq; - } - /* calculate the scaled load across CPU */ - load_at_max_freq = (cur_load * policy->cur)/policy->cpuinfo.max_freq; - - cpufreq_notify_utilization(policy, load_at_max_freq); - - /* Check for frequency increase */ - if (max_load_freq > bds_tuners_ins.up_threshold * policy->cur) { - /* If switching to max speed, apply sampling_down_factor */ - - /* busy phase */ - if (policy->cur < policy->max) - this_bds_info->rate_mult = - bds_tuners_ins.sampling_down_factor; - bds_freq_increase(policy, policy->max); - return; - } - - - /* Check for frequency decrease */ - /* if we cannot reduce the frequency anymore, break out early */ - if (policy->cur == policy->min) - return; - - /* - * The optimal frequency is the frequency that is the lowest that - * can support the current CPU usage without triggering the up - * policy. To be safe, we focus 10 points under the threshold. - */ - if (max_load_freq < - (bds_tuners_ins.up_threshold - bds_tuners_ins.down_differential) * - policy->cur) { - unsigned int freq_next; - freq_next = max_load_freq / - (bds_tuners_ins.up_threshold - - bds_tuners_ins.down_differential); - - /* No longer fully busy, reset rate_mult */ - this_bds_info->rate_mult = 1; - - if (freq_next < policy->min) - freq_next = policy->min; - - if (!bds_tuners_ins.powersave_bias) { - __cpufreq_driver_target(policy, freq_next, - CPUFREQ_RELATION_L); - } else { - int freq = powersave_bias_target(policy, freq_next, - CPUFREQ_RELATION_L); - __cpufreq_driver_target(policy, freq, - CPUFREQ_RELATION_L); - } - } -} - -static void do_bds_timer(struct work_struct *work) -{ - struct cpu_bds_info_s *bds_info = - container_of(work, struct cpu_bds_info_s, work.work); - unsigned int cpu = bds_info->cpu; - int sample_type = bds_info->sample_type; - - int delay; - - mutex_lock(&bds_info->timer_mutex); - - /* Common NORMAL_SAMPLE setup */ - bds_info->sample_type = BDS_NORMAL_SAMPLE; - if (!bds_tuners_ins.powersave_bias || - sample_type == BDS_NORMAL_SAMPLE) { - bds_check_cpu(bds_info); - if (bds_info->freq_lo) { - /* Setup timer for SUB_SAMPLE */ - bds_info->sample_type = BDS_SUB_SAMPLE; - delay = bds_info->freq_hi_jiffies; - } else { - /* We want all CPUs to do sampling nearly on - * same jiffy - */ - delay = usecs_to_jiffies(bds_tuners_ins.sampling_rate - * bds_info->rate_mult); - - if (num_online_cpus() > 1) - delay -= jiffies % delay; - } - } else { - __cpufreq_driver_target(bds_info->cur_policy, - bds_info->freq_lo, CPUFREQ_RELATION_H); - delay = bds_info->freq_lo_jiffies; - } - schedule_delayed_work_on(cpu, &bds_info->work, delay); - mutex_unlock(&bds_info->timer_mutex); -} - -static inline void bds_timer_init(struct cpu_bds_info_s *bds_info) -{ - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(bds_tuners_ins.sampling_rate); - - if (num_online_cpus() > 1) - delay -= jiffies % delay; - - bds_info->sample_type = BDS_NORMAL_SAMPLE; - INIT_DELAYED_WORK_DEFERRABLE(&bds_info->work, do_bds_timer); - schedule_delayed_work_on(bds_info->cpu, &bds_info->work, delay); -} - -static inline void bds_timer_exit(struct cpu_bds_info_s *bds_info) -{ - cancel_delayed_work_sync(&bds_info->work); -} - -/* - * Not all CPUs want IO time to be accounted as busy; this dependson how - * efficient idling at a higher frequency/voltage is. - * Pavel Machek says this is not so for various generations of AMD and old - * Intel systems. - * Mike Chan (androidlcom) calis this is also not true for ARM. - * Because of this, whitelist specific known (series) of CPUs by default, and - * leave all others up to the user. - */ -static int should_io_be_busy(void) -{ -#if defined(CONFIG_X86) - /* - * For Intel, Core 2 (model 15) andl later have an efficient idle. - */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_data.x86 == 6 && - boot_cpu_data.x86_model >= 15) - return 1; -#endif - return 0; -} - -static void bds_refresh_callback(struct work_struct *unused) -{ - struct cpufreq_policy *policy; - struct cpu_bds_info_s *this_bds_info; - unsigned int cpu = smp_processor_id(); - - if (lock_policy_rwsem_write(cpu) < 0) - return; - - this_bds_info = &per_cpu(od_cpu_bds_info, cpu); - policy = this_bds_info->cur_policy; - if (!policy) { - /* CPU not using abyssplug governor */ - unlock_policy_rwsem_write(cpu); - return; - } - - if (policy->cur < policy->max) { - policy->cur = policy->max; - - __cpufreq_driver_target(policy, policy->max, - CPUFREQ_RELATION_L); - this_bds_info->prev_cpu_idle = get_cpu_idle_time(cpu, - &this_bds_info->prev_cpu_wall); - } - unlock_policy_rwsem_write(cpu); -} - -static unsigned int enable_bds_input_event; -static void bds_input_event(struct input_handle *handle, unsigned int type, - unsigned int code, int value) -{ - int i; - - if (enable_bds_input_event) { - - if ((bds_tuners_ins.powersave_bias == POWERSAVE_BIAS_MAXLEVEL) || - (bds_tuners_ins.powersave_bias == POWERSAVE_BIAS_MINLEVEL)) { - /* nothing to do */ - return; - } - - for_each_online_cpu(i) { - queue_work_on(i, input_wq, &per_cpu(bds_refresh_work, i)); - } - } -} - -static int bds_input_connect(struct input_handler *handler, - struct input_dev *dev, const struct input_device_id *id) -{ - struct input_handle *handle; - int error; - - handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); - if (!handle) - return -ENOMEM; - - handle->dev = dev; - handle->handler = handler; - handle->name = "cpufreq"; - - error = input_register_handle(handle); - if (error) - goto err2; - - error = input_open_device(handle); - if (error) - goto err1; - - return 0; -err1: - input_unregister_handle(handle); -err2: - kfree(handle); - return error; -} - -static void bds_input_disconnect(struct input_handle *handle) -{ - input_close_device(handle); - input_unregister_handle(handle); - kfree(handle); -} - -static const struct input_device_id bds_ids[] = { - { .driver_info = 1 }, - { }, -}; - -static struct input_handler bds_input_handler = { - .event = bds_input_event, - .connect = bds_input_connect, - .disconnect = bds_input_disconnect, - .name = "cpufreq_abyss", - .id_table = bds_ids, -}; - -static int cpufreq_governor_bds(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_bds_info_s *this_bds_info; - unsigned int j; - int rc; - - this_bds_info = &per_cpu(od_cpu_bds_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - mutex_lock(&bds_mutex); - - bds_enable++; - for_each_cpu(j, policy->cpus) { - struct cpu_bds_info_s *j_bds_info; - j_bds_info = &per_cpu(od_cpu_bds_info, j); - j_bds_info->cur_policy = policy; - - j_bds_info->prev_cpu_idle = get_cpu_idle_time(j, - &j_bds_info->prev_cpu_wall); - if (bds_tuners_ins.ignore_nice) { - j_bds_info->prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - } - this_bds_info->cpu = cpu; - this_bds_info->rate_mult = 1; - abyssplug_powersave_bias_init_cpu(cpu); - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (bds_enable == 1) { - unsigned int latency; - - rc = sysfs_create_group(cpufreq_global_kobject, - &bds_attr_group); - if (rc) { - mutex_unlock(&bds_mutex); - return rc; - } - - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - /* Bring kernel and HW constraints together */ - min_sampling_rate = max(min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - bds_tuners_ins.sampling_rate = - max(min_sampling_rate, - latency * LATENCY_MULTIPLIER); - bds_tuners_ins.io_is_busy = should_io_be_busy(); - } - if (!cpu) - rc = input_register_handler(&bds_input_handler); - mutex_unlock(&bds_mutex); - - mutex_init(&this_bds_info->timer_mutex); - - if (!abyssplug_powersave_bias_setspeed( - this_bds_info->cur_policy, - NULL, - bds_tuners_ins.powersave_bias)) - bds_timer_init(this_bds_info); - break; - - case CPUFREQ_GOV_STOP: - bds_timer_exit(this_bds_info); - - mutex_lock(&bds_mutex); - mutex_destroy(&this_bds_info->timer_mutex); - bds_enable--; - /* If device is being removed, policy is no longer - * valid. */ - this_bds_info->cur_policy = NULL; - if (!cpu) - input_unregister_handler(&bds_input_handler); - mutex_unlock(&bds_mutex); - if (!bds_enable) - sysfs_remove_group(cpufreq_global_kobject, - &bds_attr_group); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_bds_info->timer_mutex); - if (policy->max < this_bds_info->cur_policy->cur) - __cpufreq_driver_target(this_bds_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_bds_info->cur_policy->cur) - __cpufreq_driver_target(this_bds_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - else if (bds_tuners_ins.powersave_bias != 0) - abyssplug_powersave_bias_setspeed( - this_bds_info->cur_policy, - policy, - bds_tuners_ins.powersave_bias); - mutex_unlock(&this_bds_info->timer_mutex); - break; - } - return 0; -} - -static int __init cpufreq_gov_bds_init(void) -{ - cputime64_t wall; - u64 idle_time; - unsigned int i; - int cpu = get_cpu(); - - idle_time = get_cpu_idle_time_us(cpu, &wall); - put_cpu(); - if (idle_time != -1ULL) { - /* Idle micro accounting is supported. Use finer thresholds */ - bds_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; - bds_tuners_ins.down_differential = - MICRO_FREQUENCY_DOWN_DIFFERENTIAL; - /* - * In no_hz/micro accounting case we set the minimum frequency - * not depending on HZ, but fixed (very low). The deferred - * timer might skip some samples if idle/sleeping as needed. - */ - min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; - } else { - /* For correct statistics, we need 10 ticks for each measure */ - min_sampling_rate = - MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); - } - - input_wq = create_workqueue("iewq"); - if (!input_wq) { - printk(KERN_ERR "Failed to create iewq workqueue\n"); - return -EFAULT; - } - for_each_possible_cpu(i) { - INIT_WORK(&per_cpu(bds_refresh_work, i), bds_refresh_callback); - } - - return cpufreq_register_governor(&cpufreq_gov_abyssplug); -} - -static void __exit cpufreq_gov_bds_exit(void) -{ - cpufreq_unregister_governor(&cpufreq_gov_abyssplug); - destroy_workqueue(input_wq); -} - -static int set_enable_bds_input_event_param(const char *val, struct kernel_param *kp) -{ - int ret = 0; - - ret = param_set_uint(val, kp); - if (ret) - pr_err("%s: error setting value %d\n", __func__, ret); - - return ret; -} -module_param_call(enable_bds_input_event, set_enable_bds_input_event_param, param_get_uint, - &enable_bds_input_event, S_IWUSR | S_IRUGO); - - -MODULE_AUTHOR("Venkatesh Pallipadi "); -MODULE_AUTHOR("Alexey Starikovskiy "); -MODULE_AUTHOR("Dennis Rassmann "); -MODULE_DESCRIPTION("'cpufreq_abyssplugv2' - An abyssplug cpufreq governor based on ondemand"); - -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ABYSSPLUG -fs_initcall(cpufreq_gov_bds_init); -#else -module_init(cpufreq_gov_bds_init); -#endif -module_exit(cpufreq_gov_bds_exit); - From 7867f1e11449a35c11e5ac31642d1335fcd0e5f2 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Mon, 12 Aug 2013 12:34:06 -0400 Subject: [PATCH 34/35] Add govs again ughhh --- drivers/cpufreq/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 8df2a4fd..84333950 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_SMARTASSH3) += cpufreq_smartassH3.o obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o +obj-$(CONFIG_CPU_FREQ_GOV_SMARTASSV2) += cpufreq_smartassv2.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o From 91babebdcd21980cfb921741a947a7f38362e212 Mon Sep 17 00:00:00 2001 From: Lens-F Date: Mon, 12 Aug 2013 13:15:44 -0400 Subject: [PATCH 35/35] CPUFREQ: Add Govs Perm. & Fix warnings/errors and change values for m7 --- drivers/cpufreq/Kconfig | 32 +++++++++++++++++++++++++++++++- drivers/cpufreq/Makefile | 3 +++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 57d2c3a0..5bbb4a78 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -104,10 +104,25 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE driver. Fallback governor will be the performance governor. config CPU_FREQ_DEFAULT_GOV_SMARTASSV2 - bool "badass" + bool "smartassv2" select CPU_FREQ_GOV_SMARTASSV2 help +config CPU_FREQ_DEFAULT_GOV_SAVAGEDZEN + bool "savagedzen" + select CPU_FREQ_GOV_SAVAGEDZEN + help + +config CPU_FREQ_DEFAULT_GOV_LULZACTIVE + bool "lulzactive" + select CPU_FREQ_GOV_LULZACTIVE + help + +config CPU_FREQ_DEFAULT_GOV_MINMAX + bool "minmax" + select CPU_FREQ_GOV_MINMAX + help + config CPU_FREQ_DEFAULT_GOV_LIONHEART bool "lionheart" select CPU_FREQ_GOV_LIONHEART @@ -334,6 +349,21 @@ config CPU_FREQ_GOV_SMARTASSV2 depends on CPU_FREQ help +config CPU_FREQ_GOV_SAVAGEDZEN + tristate "savagedzen" + depends on CPU_FREQ + help + +config CPU_FREQ_GOV_LULZACTIVE + tristate "lulzactive" + depends on CPU_FREQ + help + +config CPU_FREQ_GOV_MINMAX + tristate "minmax" + depends on CPU_FREQ + help + config CPU_FREQ_GOV_NIGHTMARE tristate "'nightmare' cpufreq governor" depends on CPU_FREQ diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 84333950..e82f56c4 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -20,6 +20,9 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART) += cpufreq_lionheart.o obj-$(CONFIG_CPU_FREQ_GOV_SMARTASSV2) += cpufreq_smartassv2.o +obj-$(CONFIG_CPU_FREQ_GOV_SAVAGEDZEN) += cpufreq_savagedzen.o +obj-$(CONFIG_CPU_FREQ_GOV_LULZACTIVE) += cpufreq_lulzactive.o +obj-$(CONFIG_CPU_FREQ_GOV_MINMAX) += cpufreq_minmax.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o