Merge branch 'android-tegra-2.6.36' into android-tegra-moto-2.6.36

This commit is contained in:
Colin Cross
2010-12-01 18:27:33 -08:00
16 changed files with 1417 additions and 364 deletions

View File

@@ -28,6 +28,7 @@ Contents:
2.3 Userspace
2.4 Ondemand
2.5 Conservative
2.6 Interactive
3. The Governor Interface in the CPUfreq Core
@@ -182,6 +183,41 @@ governor but for the opposite direction. For example when set to its
default value of '20' it means that if the CPU usage needs to be below
20% between samples to have the frequency decreased.
2.6 Interactive
---------------
The CPUfreq governor "interactive" is designed for latency-sensitive,
interactive workloads. This governor sets the CPU speed depending on
usage, similar to "ondemand" and "conservative" governors. However,
the governor is more aggressive about scaling the CPU speed up in
response to CPU-intensive activity.
Sampling the CPU load every X ms can lead to under-powering the CPU
for X ms, leading to dropped frames, stuttering UI, etc. Instead of
sampling the cpu at a specified rate, the interactive governor will
check whether to scale the cpu frequency up soon after coming out of
idle. When the cpu comes out of idle, a timer is configured to fire
within 1-2 ticks. If the cpu is very busy between exiting idle and
when the timer fires then we assume the cpu is underpowered and ramp
to MAX speed.
If the cpu was not sufficiently busy to immediately ramp to MAX speed,
then governor evaluates the cpu load since the last speed adjustment,
choosing th highest value between that longer-term load or the
short-term load since idle exit to determine the cpu speed to ramp to.
The tuneable value for this governor are:
min_sample_time: The minimum amount of time to spend at the current
frequency before ramping down. This is to ensure that the governor has
seen enough historic cpu load data to determine the appropriate
workload. Default is 80000 uS.
go_maxspeed_load: The CPU load at which to ramp to max speed. Default
is 85.
3. The Governor Interface in the CPUfreq Core
=============================================

View File

@@ -32,6 +32,7 @@ void __init tegra_reserve(unsigned long carveout_size, unsigned long fb_size,
void __init tegra_protected_aperture_init(unsigned long aperture);
void tegra_move_framebuffer(unsigned long to, unsigned long from,
unsigned long size);
int tegra_dvfs_rail_disable_by_name(const char *reg_id);
extern unsigned long tegra_bootloader_fb_start;
extern unsigned long tegra_bootloader_fb_size;

View File

@@ -87,7 +87,7 @@ static inline bool clk_is_auto_dvfs(struct clk *c)
static inline bool clk_is_dvfs(struct clk *c)
{
return c->is_dvfs;
return (c->dvfs != NULL);
}
static inline bool clk_cansleep(struct clk *c)
@@ -207,22 +207,6 @@ void clk_set_cansleep(struct clk *c)
mutex_unlock(&clock_list_lock);
}
int tegra_dvfs_set_rate(struct clk *c, unsigned long rate)
{
unsigned long flags;
int ret;
if (!clk_is_dvfs(c))
return -EINVAL;
clk_lock_save(c, flags);
ret = tegra_dvfs_set_rate_locked(c, rate);
clk_unlock_restore(c, flags);
return ret;
}
EXPORT_SYMBOL(tegra_dvfs_set_rate);
int clk_reparent(struct clk *c, struct clk *parent)
{
c->parent = parent;
@@ -233,8 +217,6 @@ void clk_init(struct clk *c)
{
clk_lock_init(c);
INIT_LIST_HEAD(&c->dvfs);
if (c->ops && c->ops->init)
c->ops->init(c);
@@ -260,7 +242,7 @@ int clk_enable(struct clk *c)
clk_lock_save(c, flags);
if (clk_is_auto_dvfs(c)) {
ret = tegra_dvfs_set_rate_locked(c, clk_get_rate_locked(c));
ret = tegra_dvfs_set_rate(c, clk_get_rate_locked(c));
if (ret)
goto out;
}
@@ -313,7 +295,7 @@ void clk_disable(struct clk *c)
c->refcnt--;
if (clk_is_auto_dvfs(c) && c->refcnt == 0)
tegra_dvfs_set_rate_locked(c, 0);
tegra_dvfs_set_rate(c, 0);
clk_unlock_restore(c, flags);
}
@@ -338,7 +320,7 @@ int clk_set_parent(struct clk *c, struct clk *parent)
if (clk_is_auto_dvfs(c) && c->refcnt > 0 &&
(!c->parent || new_rate > old_rate)) {
ret = tegra_dvfs_set_rate_locked(c, new_rate);
ret = tegra_dvfs_set_rate(c, new_rate);
if (ret)
goto out;
}
@@ -349,7 +331,7 @@ int clk_set_parent(struct clk *c, struct clk *parent)
if (clk_is_auto_dvfs(c) && c->refcnt > 0 &&
new_rate < old_rate)
ret = tegra_dvfs_set_rate_locked(c, new_rate);
ret = tegra_dvfs_set_rate(c, new_rate);
out:
clk_unlock_restore(c, flags);
@@ -382,7 +364,7 @@ int clk_set_rate(struct clk *c, unsigned long rate)
rate = c->max_rate;
if (clk_is_auto_dvfs(c) && rate > old_rate && c->refcnt > 0) {
ret = tegra_dvfs_set_rate_locked(c, rate);
ret = tegra_dvfs_set_rate(c, rate);
if (ret)
goto out;
}
@@ -392,7 +374,7 @@ int clk_set_rate(struct clk *c, unsigned long rate)
goto out;
if (clk_is_auto_dvfs(c) && rate < old_rate && c->refcnt > 0)
ret = tegra_dvfs_set_rate_locked(c, rate);
ret = tegra_dvfs_set_rate(c, rate);
out:
clk_unlock_restore(c, flags);
@@ -526,36 +508,6 @@ void __init tegra_init_clock(void)
tegra2_init_dvfs();
}
/*
* Iterate through all clocks, setting the dvfs rate to the current clock
* rate on all auto dvfs clocks, and to the saved dvfs rate on all manual
* dvfs clocks. Used to enable dvfs during late init, after the regulators
* are available.
*/
void __init tegra_clk_set_dvfs_rates(void)
{
unsigned long flags;
struct clk *c;
mutex_lock(&clock_list_lock);
list_for_each_entry(c, &clocks, node) {
clk_lock_save(c, flags);
if (clk_is_auto_dvfs(c)) {
if (c->refcnt > 0)
tegra_dvfs_set_rate_locked(c,
clk_get_rate_locked(c));
else
tegra_dvfs_set_rate_locked(c, 0);
} else if (clk_is_dvfs(c)) {
tegra_dvfs_set_rate_locked(c, c->dvfs_rate);
}
clk_unlock_restore(c, flags);
}
mutex_unlock(&clock_list_lock);
}
/*
* Iterate through all clocks, disabling any for which the refcount is 0
* but the clock init detected the bootloader left the clock on.
@@ -587,7 +539,6 @@ int __init tegra_late_init_clock(void)
{
tegra_dvfs_late_init();
tegra_disable_boot_clocks();
tegra_clk_set_dvfs_rates();
return 0;
}
late_initcall(tegra_late_init_clock);
@@ -711,7 +662,7 @@ static void dvfs_show_one(struct seq_file *s, struct dvfs *d, int level)
{
seq_printf(s, "%*s %-*s%21s%d mV\n",
level * 3 + 1, "",
30 - level * 3, d->reg_id,
30 - level * 3, d->dvfs_rail->reg_id,
"",
d->cur_millivolts);
}
@@ -719,7 +670,6 @@ static void dvfs_show_one(struct seq_file *s, struct dvfs *d, int level)
static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
{
struct clk *child;
struct dvfs *d;
const char *state = "uninit";
char div[8] = {0};
@@ -752,8 +702,8 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
30 - level * 3, c->name,
state, c->refcnt, div, clk_get_rate_all_locked(c));
list_for_each_entry(d, &c->dvfs, node)
dvfs_show_one(s, d, level + 1);
if (c->dvfs)
dvfs_show_one(s, c->dvfs, level + 1);
list_for_each_entry(child, &clocks, node) {
if (child->parent != c)

View File

@@ -77,7 +77,7 @@ enum clk_state {
struct clk {
/* node for master clocks list */
struct list_head node; /* node for list of all clocks */
struct list_head dvfs; /* list of dvfs dependencies */
struct dvfs *dvfs;
struct clk_lookup lookup;
#ifdef CONFIG_DEBUG_FS
@@ -89,7 +89,7 @@ struct clk {
unsigned long dvfs_rate;
unsigned long rate;
unsigned long max_rate;
bool is_dvfs;
unsigned long min_rate;
bool auto_dvfs;
bool cansleep;
u32 flags;
@@ -105,6 +105,8 @@ struct clk {
u32 reg;
u32 reg_shift;
struct list_head shared_bus_list;
union {
struct {
unsigned int clk_num;
@@ -127,10 +129,6 @@ struct clk {
struct clk *main;
struct clk *backup;
} cpu;
struct {
struct list_head list;
unsigned long min_rate;
} shared_bus;
struct {
struct list_head node;
bool enabled;
@@ -162,9 +160,7 @@ struct clk *tegra_get_clock_by_name(const char *name);
unsigned long clk_measure_input_freq(void);
int clk_reparent(struct clk *c, struct clk *parent);
void tegra_clk_init_from_table(struct tegra_clk_init_table *table);
void tegra_clk_set_dvfs_rates(void);
void clk_set_cansleep(struct clk *c);
unsigned long clk_get_rate_locked(struct clk *c);
int tegra_dvfs_set_rate_locked(struct clk *c, unsigned long rate);
#endif

View File

@@ -18,131 +18,198 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/list.h>
#include <linux/debugfs.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/list_sort.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/regulator/consumer.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/delay.h>
#include <asm/clkdev.h>
#include <mach/clk.h>
#include "board.h"
#include "clock.h"
#include "dvfs.h"
struct dvfs_reg {
struct list_head node; /* node in dvfs_reg_list */
struct list_head dvfs; /* list head of attached dvfs clocks */
const char *reg_id;
struct regulator *reg;
int max_millivolts;
int millivolts;
struct mutex lock;
};
static LIST_HEAD(dvfs_rail_list);
static DEFINE_MUTEX(dvfs_lock);
static LIST_HEAD(dvfs_debug_list);
static LIST_HEAD(dvfs_reg_list);
static int dvfs_rail_update(struct dvfs_rail *rail);
static DEFINE_MUTEX(dvfs_debug_list_lock);
static DEFINE_MUTEX(dvfs_reg_list_lock);
static int dvfs_reg_set_voltage(struct dvfs_reg *dvfs_reg)
void tegra_dvfs_add_relationships(struct dvfs_relationship *rels, int n)
{
int millivolts = 0;
struct dvfs *d;
int ret = 0;
int i;
struct dvfs_relationship *rel;
mutex_lock(&dvfs_reg->lock);
mutex_lock(&dvfs_lock);
list_for_each_entry(d, &dvfs_reg->dvfs, reg_node)
millivolts = max(d->cur_millivolts, millivolts);
if (millivolts == dvfs_reg->millivolts)
goto out;
dvfs_reg->millivolts = millivolts;
if (!dvfs_reg->reg) {
pr_warn("dvfs set voltage on %s ignored\n", dvfs_reg->reg_id);
goto out;
for (i = 0; i < n; i++) {
rel = &rels[i];
list_add_tail(&rel->from_node, &rel->to->relationships_from);
list_add_tail(&rel->to_node, &rel->from->relationships_to);
}
ret = regulator_set_voltage(dvfs_reg->reg,
millivolts * 1000, dvfs_reg->max_millivolts * 1000);
mutex_unlock(&dvfs_lock);
}
int tegra_dvfs_init_rails(struct dvfs_rail *rails[], int n)
{
int i;
mutex_lock(&dvfs_lock);
for (i = 0; i < n; i++) {
INIT_LIST_HEAD(&rails[i]->dvfs);
INIT_LIST_HEAD(&rails[i]->relationships_from);
INIT_LIST_HEAD(&rails[i]->relationships_to);
rails[i]->millivolts = rails[i]->nominal_millivolts;
rails[i]->new_millivolts = rails[i]->nominal_millivolts;
if (!rails[i]->step)
rails[i]->step = rails[i]->max_millivolts;
list_add_tail(&rails[i]->node, &dvfs_rail_list);
}
mutex_unlock(&dvfs_lock);
return 0;
};
static int dvfs_solve_relationship(struct dvfs_relationship *rel)
{
return rel->solve(rel->from, rel->to);
}
/* Sets the voltage on a dvfs rail to a specific value, and updates any
* rails that depend on this rail. */
static int dvfs_rail_set_voltage(struct dvfs_rail *rail, int millivolts)
{
int ret = 0;
struct dvfs_relationship *rel;
int step = (millivolts > rail->millivolts) ? rail->step : -rail->step;
int i;
int steps;
if (!rail->reg) {
if (millivolts == rail->millivolts)
return 0;
else
return -EINVAL;
}
if (rail->disabled)
return 0;
steps = DIV_ROUND_UP(abs(millivolts - rail->millivolts), rail->step);
for (i = 0; i < steps; i++) {
if (abs(millivolts - rail->millivolts) > rail->step)
rail->new_millivolts = rail->millivolts + step;
else
rail->new_millivolts = millivolts;
/* Before changing the voltage, tell each rail that depends
* on this rail that the voltage will change.
* This rail will be the "from" rail in the relationship,
* the rail that depends on this rail will be the "to" rail.
* from->millivolts will be the old voltage
* from->new_millivolts will be the new voltage */
list_for_each_entry(rel, &rail->relationships_to, to_node) {
ret = dvfs_rail_update(rel->to);
if (ret)
return ret;
}
if (!rail->disabled) {
ret = regulator_set_voltage(rail->reg,
rail->new_millivolts * 1000,
rail->max_millivolts * 1000);
}
if (ret) {
pr_err("Failed to set dvfs regulator %s\n", rail->reg_id);
return ret;
}
rail->millivolts = rail->new_millivolts;
/* After changing the voltage, tell each rail that depends
* on this rail that the voltage has changed.
* from->millivolts and from->new_millivolts will be the
* new voltage */
list_for_each_entry(rel, &rail->relationships_to, to_node) {
ret = dvfs_rail_update(rel->to);
if (ret)
return ret;
}
}
if (unlikely(rail->millivolts != millivolts)) {
pr_err("%s: rail didn't reach target %d in %d steps (%d)\n",
__func__, millivolts, steps, rail->millivolts);
return -EINVAL;
}
out:
mutex_unlock(&dvfs_reg->lock);
return ret;
}
static int dvfs_reg_connect_to_regulator(struct dvfs_reg *dvfs_reg)
/* Determine the minimum valid voltage for a rail, taking into account
* the dvfs clocks and any rails that this rail depends on. Calls
* dvfs_rail_set_voltage with the new voltage, which will call
* dvfs_rail_update on any rails that depend on this rail. */
static int dvfs_rail_update(struct dvfs_rail *rail)
{
int millivolts = 0;
struct dvfs *d;
struct dvfs_relationship *rel;
int ret = 0;
/* if dvfs is suspended, return and handle it during resume */
if (rail->suspended)
return 0;
/* if regulators are not connected yet, return and handle it later */
if (!rail->reg)
return 0;
/* Find the maximum voltage requested by any clock */
list_for_each_entry(d, &rail->dvfs, reg_node)
millivolts = max(d->cur_millivolts, millivolts);
rail->new_millivolts = millivolts;
/* Check any rails that this rail depends on */
list_for_each_entry(rel, &rail->relationships_from, from_node)
rail->new_millivolts = dvfs_solve_relationship(rel);
if (rail->new_millivolts != rail->millivolts)
ret = dvfs_rail_set_voltage(rail, rail->new_millivolts);
return ret;
}
static int dvfs_rail_connect_to_regulator(struct dvfs_rail *rail)
{
struct regulator *reg;
if (!dvfs_reg->reg) {
reg = regulator_get(NULL, dvfs_reg->reg_id);
if (!rail->reg) {
reg = regulator_get(NULL, rail->reg_id);
if (IS_ERR(reg))
return -EINVAL;
}
dvfs_reg->reg = reg;
rail->reg = reg;
return 0;
}
static struct dvfs_reg *get_dvfs_reg(struct dvfs *d)
{
struct dvfs_reg *dvfs_reg;
mutex_lock(&dvfs_reg_list_lock);
list_for_each_entry(dvfs_reg, &dvfs_reg_list, node)
if (!strcmp(d->reg_id, dvfs_reg->reg_id))
goto out;
dvfs_reg = kzalloc(sizeof(struct dvfs_reg), GFP_KERNEL);
if (!dvfs_reg) {
pr_err("%s: Failed to allocate dvfs_reg\n", __func__);
goto out;
}
mutex_init(&dvfs_reg->lock);
INIT_LIST_HEAD(&dvfs_reg->dvfs);
dvfs_reg->reg_id = kstrdup(d->reg_id, GFP_KERNEL);
list_add_tail(&dvfs_reg->node, &dvfs_reg_list);
out:
mutex_unlock(&dvfs_reg_list_lock);
return dvfs_reg;
}
static struct dvfs_reg *attach_dvfs_reg(struct dvfs *d)
{
struct dvfs_reg *dvfs_reg;
dvfs_reg = get_dvfs_reg(d);
if (!dvfs_reg)
return NULL;
mutex_lock(&dvfs_reg->lock);
list_add_tail(&d->reg_node, &dvfs_reg->dvfs);
d->dvfs_reg = dvfs_reg;
if (d->max_millivolts > d->dvfs_reg->max_millivolts)
d->dvfs_reg->max_millivolts = d->max_millivolts;
d->cur_millivolts = d->max_millivolts;
mutex_unlock(&dvfs_reg->lock);
return dvfs_reg;
}
static int
__tegra_dvfs_set_rate(struct clk *c, struct dvfs *d, unsigned long rate)
__tegra_dvfs_set_rate(struct dvfs *d, unsigned long rate)
{
int i = 0;
int ret;
@@ -152,7 +219,7 @@ __tegra_dvfs_set_rate(struct clk *c, struct dvfs *d, unsigned long rate)
if (rate > d->freqs[d->num_freqs - 1]) {
pr_warn("tegra_dvfs: rate %lu too high for dvfs on %s\n", rate,
c->name);
d->clk_name);
return -EINVAL;
}
@@ -167,54 +234,39 @@ __tegra_dvfs_set_rate(struct clk *c, struct dvfs *d, unsigned long rate)
d->cur_rate = rate;
if (!d->dvfs_reg)
return 0;
ret = dvfs_reg_set_voltage(d->dvfs_reg);
ret = dvfs_rail_update(d->dvfs_rail);
if (ret)
pr_err("Failed to set regulator %s for clock %s to %d mV\n",
d->dvfs_reg->reg_id, c->name, d->cur_millivolts);
d->dvfs_rail->reg_id, d->clk_name, d->cur_millivolts);
return ret;
}
int tegra_dvfs_set_rate_locked(struct clk *c, unsigned long rate)
int tegra_dvfs_set_rate(struct clk *c, unsigned long rate)
{
struct dvfs *d;
int ret = 0;
bool freq_up;
int ret;
c->dvfs_rate = rate;
if (!c->dvfs)
return -EINVAL;
freq_up = (c->refcnt == 0) || (rate > clk_get_rate_locked(c));
mutex_lock(&dvfs_lock);
ret = __tegra_dvfs_set_rate(c->dvfs, rate);
mutex_unlock(&dvfs_lock);
list_for_each_entry(d, &c->dvfs, node) {
if (d->higher == freq_up)
ret = __tegra_dvfs_set_rate(c, d, rate);
if (ret)
return ret;
}
list_for_each_entry(d, &c->dvfs, node) {
if (d->higher != freq_up)
ret = __tegra_dvfs_set_rate(c, d, rate);
if (ret)
return ret;
}
return 0;
return ret;
}
EXPORT_SYMBOL(tegra_dvfs_set_rate);
/* May only be called during clock init, does not take any locks on clock c. */
int __init tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d)
{
int i;
struct dvfs_reg *dvfs_reg;
dvfs_reg = attach_dvfs_reg(d);
if (!dvfs_reg) {
pr_err("Failed to get regulator %s for clock %s\n",
d->reg_id, c->name);
if (c->dvfs) {
pr_err("Error when enabling dvfs on %s for clock %s:\n",
d->dvfs_rail->reg_id, c->name);
pr_err("DVFS already enabled for %s\n",
c->dvfs->dvfs_rail->reg_id);
return -EINVAL;
}
@@ -235,17 +287,172 @@ int __init tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d)
clk_set_cansleep(c);
}
c->is_dvfs = true;
c->dvfs = d;
list_add_tail(&d->node, &c->dvfs);
mutex_lock(&dvfs_debug_list_lock);
list_add_tail(&d->debug_node, &dvfs_debug_list);
mutex_unlock(&dvfs_debug_list_lock);
mutex_lock(&dvfs_lock);
list_add_tail(&d->reg_node, &d->dvfs_rail->dvfs);
mutex_unlock(&dvfs_lock);
return 0;
}
static bool tegra_dvfs_all_rails_suspended(void)
{
struct dvfs_rail *rail;
bool all_suspended = true;
list_for_each_entry(rail, &dvfs_rail_list, node)
if (!rail->suspended && !rail->disabled)
all_suspended = false;
return all_suspended;
}
static bool tegra_dvfs_from_rails_suspended(struct dvfs_rail *to)
{
struct dvfs_relationship *rel;
bool all_suspended = true;
list_for_each_entry(rel, &to->relationships_from, from_node)
if (!rel->from->suspended && !rel->from->disabled)
all_suspended = false;
return all_suspended;
}
static int tegra_dvfs_suspend_one(void)
{
struct dvfs_rail *rail;
int ret;
list_for_each_entry(rail, &dvfs_rail_list, node) {
if (!rail->suspended && !rail->disabled &&
tegra_dvfs_from_rails_suspended(rail)) {
ret = dvfs_rail_set_voltage(rail,
rail->nominal_millivolts);
if (ret)
return ret;
rail->suspended = true;
return 0;
}
}
return -EINVAL;
}
static void tegra_dvfs_resume(void)
{
struct dvfs_rail *rail;
mutex_lock(&dvfs_lock);
list_for_each_entry(rail, &dvfs_rail_list, node)
rail->suspended = false;
list_for_each_entry(rail, &dvfs_rail_list, node)
dvfs_rail_update(rail);
mutex_unlock(&dvfs_lock);
}
static int tegra_dvfs_suspend(void)
{
int ret = 0;
mutex_lock(&dvfs_lock);
while (!tegra_dvfs_all_rails_suspended()) {
ret = tegra_dvfs_suspend_one();
if (ret)
break;
}
mutex_unlock(&dvfs_lock);
if (ret)
tegra_dvfs_resume();
return ret;
}
static int tegra_dvfs_pm_notify(struct notifier_block *nb,
unsigned long event, void *data)
{
switch (event) {
case PM_SUSPEND_PREPARE:
if (tegra_dvfs_suspend())
return NOTIFY_STOP;
break;
case PM_POST_SUSPEND:
tegra_dvfs_resume();
break;
}
return NOTIFY_OK;
};
static struct notifier_block tegra_dvfs_nb = {
.notifier_call = tegra_dvfs_pm_notify,
};
/* must be called with dvfs lock held */
static void __tegra_dvfs_rail_disable(struct dvfs_rail *rail)
{
int ret;
if (!rail->disabled) {
ret = dvfs_rail_set_voltage(rail, rail->nominal_millivolts);
if (ret)
pr_info("dvfs: failed to set regulator %s to disable "
"voltage %d\n", rail->reg_id,
rail->nominal_millivolts);
rail->disabled = true;
}
}
/* must be called with dvfs lock held */
static void __tegra_dvfs_rail_enable(struct dvfs_rail *rail)
{
if (rail->disabled) {
rail->disabled = false;
dvfs_rail_update(rail);
}
}
void tegra_dvfs_rail_enable(struct dvfs_rail *rail)
{
mutex_lock(&dvfs_lock);
__tegra_dvfs_rail_enable(rail);
mutex_unlock(&dvfs_lock);
}
void tegra_dvfs_rail_disable(struct dvfs_rail *rail)
{
mutex_lock(&dvfs_lock);
__tegra_dvfs_rail_disable(rail);
mutex_unlock(&dvfs_lock);
}
int tegra_dvfs_rail_disable_by_name(const char *reg_id)
{
struct dvfs_rail *rail;
int ret = 0;
mutex_lock(&dvfs_lock);
list_for_each_entry(rail, &dvfs_rail_list, node) {
if (!strcmp(reg_id, rail->reg_id)) {
__tegra_dvfs_rail_disable(rail);
goto out;
}
}
ret = -EINVAL;
out:
mutex_unlock(&dvfs_lock);
return ret;
}
/*
* Iterate through all the dvfs regulators, finding the regulator exported
* by the regulator api for each one. Must be called in late init, after
@@ -253,12 +460,19 @@ int __init tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d)
*/
int __init tegra_dvfs_late_init(void)
{
struct dvfs_reg *dvfs_reg;
struct dvfs_rail *rail;
mutex_lock(&dvfs_reg_list_lock);
list_for_each_entry(dvfs_reg, &dvfs_reg_list, node)
dvfs_reg_connect_to_regulator(dvfs_reg);
mutex_unlock(&dvfs_reg_list_lock);
mutex_lock(&dvfs_lock);
list_for_each_entry(rail, &dvfs_rail_list, node)
dvfs_rail_connect_to_regulator(rail);
list_for_each_entry(rail, &dvfs_rail_list, node)
dvfs_rail_update(rail);
mutex_unlock(&dvfs_lock);
register_pm_notifier(&tegra_dvfs_nb);
return 0;
}
@@ -266,11 +480,11 @@ int __init tegra_dvfs_late_init(void)
#ifdef CONFIG_DEBUG_FS
static int dvfs_tree_sort_cmp(void *p, struct list_head *a, struct list_head *b)
{
struct dvfs *da = list_entry(a, struct dvfs, debug_node);
struct dvfs *db = list_entry(b, struct dvfs, debug_node);
struct dvfs *da = list_entry(a, struct dvfs, reg_node);
struct dvfs *db = list_entry(b, struct dvfs, reg_node);
int ret;
ret = strcmp(da->reg_id, db->reg_id);
ret = strcmp(da->dvfs_rail->reg_id, db->dvfs_rail->reg_id);
if (ret != 0)
return ret;
@@ -285,27 +499,33 @@ static int dvfs_tree_sort_cmp(void *p, struct list_head *a, struct list_head *b)
static int dvfs_tree_show(struct seq_file *s, void *data)
{
struct dvfs *d;
const char *last_reg = "";
struct dvfs_rail *rail;
struct dvfs_relationship *rel;
seq_printf(s, " clock rate mV\n");
seq_printf(s, "--------------------------------\n");
mutex_lock(&dvfs_debug_list_lock);
mutex_lock(&dvfs_lock);
list_sort(NULL, &dvfs_debug_list, dvfs_tree_sort_cmp);
list_for_each_entry(d, &dvfs_debug_list, debug_node) {
if (strcmp(last_reg, d->dvfs_reg->reg_id) != 0) {
last_reg = d->dvfs_reg->reg_id;
seq_printf(s, "%s %d mV:\n", d->dvfs_reg->reg_id,
d->dvfs_reg->millivolts);
list_for_each_entry(rail, &dvfs_rail_list, node) {
seq_printf(s, "%s %d mV%s:\n", rail->reg_id,
rail->millivolts, rail->disabled ? " disabled" : "");
list_for_each_entry(rel, &rail->relationships_from, from_node) {
seq_printf(s, " %-10s %-7d mV %-4d mV\n",
rel->from->reg_id,
rel->from->millivolts,
dvfs_solve_relationship(rel));
}
seq_printf(s, " %-10s %-10lu %-4d mV\n", d->clk_name,
d->cur_rate, d->cur_millivolts);
list_sort(NULL, &rail->dvfs, dvfs_tree_sort_cmp);
list_for_each_entry(d, &rail->dvfs, reg_node) {
seq_printf(s, " %-10s %-10lu %-4d mV\n", d->clk_name,
d->cur_rate, d->cur_millivolts);
}
}
mutex_unlock(&dvfs_debug_list_lock);
mutex_unlock(&dvfs_lock);
return 0;
}

View File

@@ -22,25 +22,57 @@
#define MAX_DVFS_FREQS 16
struct clk;
struct dvfs_rail;
/*
* dvfs_relationship between to rails, "from" and "to"
* when the rail changes, it will call dvfs_rail_update on the rails
* in the relationship_to list.
* when determining the voltage to set a rail to, it will consider each
* rail in the relationship_from list.
*/
struct dvfs_relationship {
struct dvfs_rail *to;
struct dvfs_rail *from;
int (*solve)(struct dvfs_rail *, struct dvfs_rail *);
struct list_head to_node; /* node in relationship_to list */
struct list_head from_node; /* node in relationship_from list */
};
struct dvfs_rail {
const char *reg_id;
int min_millivolts;
int max_millivolts;
int nominal_millivolts;
int step;
bool disabled;
struct list_head node; /* node in dvfs_rail_list */
struct list_head dvfs; /* list head of attached dvfs clocks */
struct list_head relationships_to;
struct list_head relationships_from;
struct regulator *reg;
int millivolts;
int new_millivolts;
bool suspended;
};
struct dvfs {
/* Used only by tegra2_clock.c */
const char *clk_name;
int process_id;
bool cpu;
int cpu_process_id;
/* Must be initialized before tegra_dvfs_init */
const char *reg_id;
int freqs_mult;
unsigned long freqs[MAX_DVFS_FREQS];
unsigned long millivolts[MAX_DVFS_FREQS];
const int *millivolts;
struct dvfs_rail *dvfs_rail;
bool auto_dvfs;
bool higher;
/* Filled in by tegra_dvfs_init */
int max_millivolts;
int num_freqs;
struct dvfs_reg *dvfs_reg;
int cur_millivolts;
unsigned long cur_rate;
@@ -53,5 +85,9 @@ void tegra2_init_dvfs(void);
int tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d);
int dvfs_debugfs_init(struct dentry *clk_debugfs_root);
int tegra_dvfs_late_init(void);
int tegra_dvfs_init_rails(struct dvfs_rail *dvfs_rails[], int n);
void tegra_dvfs_add_relationships(struct dvfs_relationship *rels, int n);
void tegra_dvfs_rail_enable(struct dvfs_rail *rail);
void tegra_dvfs_rail_disable(struct dvfs_rail *rail);
#endif

View File

@@ -302,8 +302,6 @@ static void tegra2_super_clk_init(struct clk *c)
}
BUG_ON(sel->input == NULL);
c->parent = sel->input;
INIT_LIST_HEAD(&c->u.shared_bus.list);
}
static int tegra2_super_clk_enable(struct clk *c)
@@ -1151,9 +1149,9 @@ static struct clk_ops tegra_cdev_clk_ops = {
static void tegra_clk_shared_bus_update(struct clk *bus)
{
struct clk *c;
unsigned long rate = bus->u.shared_bus.min_rate;
unsigned long rate = bus->min_rate;
list_for_each_entry(c, &bus->u.shared_bus.list,
list_for_each_entry(c, &bus->shared_bus_list,
u.shared_bus_user.node) {
if (c->u.shared_bus_user.enabled)
rate = max(c->u.shared_bus_user.rate, rate);
@@ -1170,7 +1168,7 @@ static void tegra_clk_shared_bus_init(struct clk *c)
c->set = true;
list_add_tail(&c->u.shared_bus_user.node,
&c->parent->u.shared_bus.list);
&c->parent->shared_bus_list);
}
static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate)
@@ -1716,9 +1714,7 @@ static struct clk tegra_clk_sclk = {
.reg = 0x28,
.ops = &tegra_super_ops,
.max_rate = 240000000,
.u.shared_bus = {
.min_rate = 120000000,
},
.min_rate = 120000000,
};
static struct clk tegra_clk_virtual_cpu = {
@@ -2018,6 +2014,7 @@ struct clk *tegra_ptr_clks[] = {
static void tegra2_init_one_clock(struct clk *c)
{
clk_init(c);
INIT_LIST_HEAD(&c->shared_bus_list);
if (!c->lookup.dev_id && !c->lookup.con_id)
c->lookup.con_id = c->name;
c->lookup.clk = c;

View File

@@ -20,86 +20,134 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/module.h>
#include "clock.h"
#include "dvfs.h"
#include "fuse.h"
#define CORE_REGULATOR "vdd_core"
#define CPU_REGULATOR "vdd_cpu"
#ifdef CONFIG_TEGRA_CORE_DVFS
static bool tegra_dvfs_core_disabled;
#else
static bool tegra_dvfs_core_disabled = true;
#endif
#ifdef CONFIG_TEGRA_CPU_DVFS
static bool tegra_dvfs_cpu_disabled;
#else
static bool tegra_dvfs_cpu_disabled = true;
#endif
static const int core_millivolts[MAX_DVFS_FREQS] =
{950, 1000, 1100, 1200, 1275};
static const int cpu_millivolts[MAX_DVFS_FREQS] =
{750, 775, 800, 825, 875, 900, 925, 975, 1000, 1050, 1100};
static int cpu_core_millivolts[MAX_DVFS_FREQS];
#define CORE_MAX_MILLIVOLTS 1275
#define CPU_MAX_MILLIVOLTS 1100
#define KHZ 1000
#define MHZ 1000000
#ifdef CONFIG_TEGRA_CPU_DVFS
#define CPU_DVFS_CPU(_clk_name, _process_id, _mult, _freqs...) \
{ \
.clk_name = _clk_name, \
.reg_id = CPU_REGULATOR, \
.cpu = true, \
.process_id = _process_id, \
.freqs = {_freqs}, \
.freqs_mult = _mult, \
.auto_dvfs = true, \
.max_millivolts = CPU_MAX_MILLIVOLTS \
},
static struct dvfs_rail tegra2_dvfs_rail_vdd_cpu = {
.reg_id = "vdd_cpu",
.max_millivolts = 1100,
.min_millivolts = 750,
.nominal_millivolts = 1100,
};
#ifdef CONFIG_TEGRA_CORE_DVFS /* CPU_DVFS && CORE_DVFS */
#define CPU_DVFS_CORE(_clk_name, _process_id, _mult, _freqs...) \
{ \
.clk_name = _clk_name, \
.reg_id = CORE_REGULATOR, \
.cpu = false, \
.process_id = _process_id, \
.freqs = {_freqs}, \
.freqs_mult = _mult, \
.auto_dvfs = true, \
.higher = true, \
.max_millivolts = CORE_MAX_MILLIVOLTS \
},
#else /* CPU_DVFS && !CORE_DVFS */
#define CPU_DVFS_CORE(_clk_name, _process_id, _mult, _freqs...)
#endif
#else /* !CPU_DVFS */
#define CPU_DVFS_CPU(_clk_name, _process_id, _mult, _freqs...)
#define CPU_DVFS_CORE(_clk_name, _process_id, _mult, _freqs...)
#endif
static struct dvfs_rail tegra2_dvfs_rail_vdd_core = {
.reg_id = "vdd_core",
.max_millivolts = 1275,
.min_millivolts = 950,
.nominal_millivolts = 1200,
.step = 150, /* step vdd_core by 150 mV to allow vdd_aon to follow */
};
#ifdef CONFIG_TEGRA_CORE_DVFS
#define CORE_DVFS(_clk_name, _auto, _mult, _freqs...) \
{ \
.clk_name = _clk_name, \
.reg_id = CORE_REGULATOR, \
.process_id = -1, \
.freqs = {_freqs}, \
.freqs_mult = _mult, \
.auto_dvfs = _auto, \
.max_millivolts = CORE_MAX_MILLIVOLTS \
},
#else
#define CORE_DVFS(_clk_name, _process_id, _mult, _freqs...)
static struct dvfs_rail tegra2_dvfs_rail_vdd_aon = {
.reg_id = "vdd_aon",
.max_millivolts = 1275,
.min_millivolts = 950,
.nominal_millivolts = 1200,
#ifndef CONFIG_TEGRA_CORE_DVFS
.disabled = true,
#endif
};
/* vdd_core and vdd_aon must be 50 mV higher than vdd_cpu */
static int tegra2_dvfs_rel_vdd_cpu_vdd_core(struct dvfs_rail *vdd_cpu,
struct dvfs_rail *vdd_core)
{
if (vdd_cpu->new_millivolts > vdd_cpu->millivolts &&
vdd_core->new_millivolts < vdd_cpu->new_millivolts + 50)
return vdd_cpu->new_millivolts + 50;
if (vdd_core->new_millivolts < vdd_cpu->millivolts + 50)
return vdd_cpu->millivolts + 50;
return vdd_core->new_millivolts;
}
/* vdd_aon must be within 170 mV of vdd_core */
static int tegra2_dvfs_rel_vdd_core_vdd_aon(struct dvfs_rail *vdd_core,
struct dvfs_rail *vdd_aon)
{
BUG_ON(abs(vdd_aon->millivolts - vdd_core->millivolts) >
vdd_aon->step);
return vdd_core->millivolts;
}
static struct dvfs_relationship tegra2_dvfs_relationships[] = {
{
/* vdd_core must be 50 mV higher than vdd_cpu */
.from = &tegra2_dvfs_rail_vdd_cpu,
.to = &tegra2_dvfs_rail_vdd_core,
.solve = tegra2_dvfs_rel_vdd_cpu_vdd_core,
},
{
/* vdd_aon must be 50 mV higher than vdd_cpu */
.from = &tegra2_dvfs_rail_vdd_cpu,
.to = &tegra2_dvfs_rail_vdd_aon,
.solve = tegra2_dvfs_rel_vdd_cpu_vdd_core,
},
{
/* vdd_aon must be within 170 mV of vdd_core */
.from = &tegra2_dvfs_rail_vdd_core,
.to = &tegra2_dvfs_rail_vdd_aon,
.solve = tegra2_dvfs_rel_vdd_core_vdd_aon,
},
};
static struct dvfs_rail *tegra2_dvfs_rails[] = {
&tegra2_dvfs_rail_vdd_cpu,
&tegra2_dvfs_rail_vdd_core,
&tegra2_dvfs_rail_vdd_aon,
};
#define CPU_DVFS(_clk_name, _process_id, _mult, _freqs...) \
CPU_DVFS_CORE(_clk_name, _process_id, _mult, _freqs) \
CPU_DVFS_CPU(_clk_name, _process_id, _mult, _freqs) \
{ \
.clk_name = _clk_name, \
.cpu_process_id = _process_id, \
.freqs = {_freqs}, \
.freqs_mult = _mult, \
.millivolts = cpu_millivolts, \
.auto_dvfs = true, \
.dvfs_rail = &tegra2_dvfs_rail_vdd_cpu, \
}
#define CORE_DVFS(_clk_name, _auto, _mult, _freqs...) \
{ \
.clk_name = _clk_name, \
.cpu_process_id = -1, \
.freqs = {_freqs}, \
.freqs_mult = _mult, \
.millivolts = core_millivolts, \
.auto_dvfs = _auto, \
.dvfs_rail = &tegra2_dvfs_rail_vdd_core, \
}
static struct dvfs dvfs_init[] = {
/* Cpu voltages (mV): 750, 775, 800, 825, 875, 900, 925, 975, 1000, 1050, 1100 */
CPU_DVFS("cpu", 0, MHZ, 314, 314, 314, 456, 456, 608, 608, 760, 817, 912, 1000)
CPU_DVFS("cpu", 1, MHZ, 314, 314, 314, 456, 456, 618, 618, 770, 827, 922, 1000)
CPU_DVFS("cpu", 2, MHZ, 494, 675, 675, 675, 817, 817, 922, 1000)
CPU_DVFS("cpu", 3, MHZ, 730, 760, 845, 845, 1000)
CPU_DVFS("cpu", 0, MHZ, 314, 314, 314, 456, 456, 608, 608, 760, 817, 912, 1000),
CPU_DVFS("cpu", 1, MHZ, 314, 314, 314, 456, 456, 618, 618, 770, 827, 922, 1000),
CPU_DVFS("cpu", 2, MHZ, 494, 675, 675, 675, 817, 817, 922, 1000),
CPU_DVFS("cpu", 3, MHZ, 730, 760, 845, 845, 1000),
/* Core voltages (mV): 950, 1000, 1100, 1200, 1275 */
@@ -110,22 +158,22 @@ static struct dvfs dvfs_init[] = {
* For now, boards must ensure that the core voltage does not drop
* below 1V, or that the sdmmc busses are set to 44 MHz or less.
*/
CORE_DVFS("sdmmc1", 1, KHZ, 44000, 52000, 52000, 52000, 52000)
CORE_DVFS("sdmmc2", 1, KHZ, 44000, 52000, 52000, 52000, 52000)
CORE_DVFS("sdmmc3", 1, KHZ, 44000, 52000, 52000, 52000, 52000)
CORE_DVFS("sdmmc4", 1, KHZ, 44000, 52000, 52000, 52000, 52000)
CORE_DVFS("sdmmc1", 1, KHZ, 44000, 52000, 52000, 52000, 52000),
CORE_DVFS("sdmmc2", 1, KHZ, 44000, 52000, 52000, 52000, 52000),
CORE_DVFS("sdmmc3", 1, KHZ, 44000, 52000, 52000, 52000, 52000),
CORE_DVFS("sdmmc4", 1, KHZ, 44000, 52000, 52000, 52000, 52000),
#endif
CORE_DVFS("ndflash", 1, KHZ, 130000, 150000, 158000, 164000, 164000)
CORE_DVFS("nor", 1, KHZ, 0, 92000, 92000, 92000, 92000)
CORE_DVFS("ide", 1, KHZ, 0, 0, 100000, 100000, 100000)
CORE_DVFS("mipi", 1, KHZ, 0, 40000, 40000, 40000, 60000)
CORE_DVFS("usbd", 1, KHZ, 0, 0, 480000, 480000, 480000)
CORE_DVFS("usb2", 1, KHZ, 0, 0, 480000, 480000, 480000)
CORE_DVFS("usb3", 1, KHZ, 0, 0, 480000, 480000, 480000)
CORE_DVFS("pcie", 1, KHZ, 0, 0, 0, 250000, 250000)
CORE_DVFS("dsi", 1, KHZ, 100000, 100000, 100000, 500000, 500000)
CORE_DVFS("tvo", 1, KHZ, 0, 0, 0, 250000, 250000)
CORE_DVFS("ndflash", 1, KHZ, 130000, 150000, 158000, 164000, 164000),
CORE_DVFS("nor", 1, KHZ, 0, 92000, 92000, 92000, 92000),
CORE_DVFS("ide", 1, KHZ, 0, 0, 100000, 100000, 100000),
CORE_DVFS("mipi", 1, KHZ, 0, 40000, 40000, 40000, 60000),
CORE_DVFS("usbd", 1, KHZ, 0, 0, 480000, 480000, 480000),
CORE_DVFS("usb2", 1, KHZ, 0, 0, 480000, 480000, 480000),
CORE_DVFS("usb3", 1, KHZ, 0, 0, 480000, 480000, 480000),
CORE_DVFS("pcie", 1, KHZ, 0, 0, 0, 250000, 250000),
CORE_DVFS("dsi", 1, KHZ, 100000, 100000, 100000, 500000, 500000),
CORE_DVFS("tvo", 1, KHZ, 0, 0, 0, 250000, 250000),
/*
* The clock rate for the display controllers that determines the
@@ -133,54 +181,99 @@ static struct dvfs dvfs_init[] = {
* to the display block. Disable auto-dvfs on the display clocks,
* and let the display driver call tegra_dvfs_set_rate manually
*/
CORE_DVFS("disp1", 0, KHZ, 158000, 158000, 190000, 190000, 190000)
CORE_DVFS("disp2", 0, KHZ, 158000, 158000, 190000, 190000, 190000)
CORE_DVFS("hdmi", 0, KHZ, 0, 0, 0, 148500, 148500)
CORE_DVFS("disp1", 0, KHZ, 158000, 158000, 190000, 190000, 190000),
CORE_DVFS("disp2", 0, KHZ, 158000, 158000, 190000, 190000, 190000),
CORE_DVFS("hdmi", 0, KHZ, 0, 0, 0, 148500, 148500),
/*
* These clocks technically depend on the core process id,
* but just use the worst case value for now
*/
CORE_DVFS("host1x", 1, KHZ, 104500, 133000, 166000, 166000, 166000)
CORE_DVFS("epp", 1, KHZ, 133000, 171000, 247000, 300000, 300000)
CORE_DVFS("2d", 1, KHZ, 133000, 171000, 247000, 300000, 300000)
CORE_DVFS("3d", 1, KHZ, 114000, 161500, 247000, 300000, 300000)
CORE_DVFS("mpe", 1, KHZ, 104500, 152000, 228000, 250000, 250000)
CORE_DVFS("vi", 1, KHZ, 85000, 100000, 150000, 150000, 150000)
CORE_DVFS("sclk", 1, KHZ, 95000, 133000, 190000, 250000, 250000)
CORE_DVFS("vde", 1, KHZ, 95000, 123500, 209000, 250000, 250000)
CORE_DVFS("host1x", 1, KHZ, 104500, 133000, 166000, 166000, 166000),
CORE_DVFS("epp", 1, KHZ, 133000, 171000, 247000, 300000, 300000),
CORE_DVFS("2d", 1, KHZ, 133000, 171000, 247000, 300000, 300000),
CORE_DVFS("3d", 1, KHZ, 114000, 161500, 247000, 300000, 300000),
CORE_DVFS("mpe", 1, KHZ, 104500, 152000, 228000, 250000, 250000),
CORE_DVFS("vi", 1, KHZ, 85000, 100000, 150000, 150000, 150000),
CORE_DVFS("sclk", 1, KHZ, 95000, 133000, 190000, 250000, 250000),
CORE_DVFS("vde", 1, KHZ, 95000, 123500, 209000, 250000, 250000),
/* What is this? */
CORE_DVFS("NVRM_DEVID_CLK_SRC", 1, MHZ, 480, 600, 800, 1067, 1067)
CORE_DVFS("NVRM_DEVID_CLK_SRC", 1, MHZ, 480, 600, 800, 1067, 1067),
};
int tegra_dvfs_disable_core_set(const char *arg, const struct kernel_param *kp)
{
int ret;
ret = param_set_bool(arg, kp);
if (ret)
return ret;
if (tegra_dvfs_core_disabled)
tegra_dvfs_rail_disable(&tegra2_dvfs_rail_vdd_core);
else
tegra_dvfs_rail_enable(&tegra2_dvfs_rail_vdd_core);
return 0;
}
int tegra_dvfs_disable_cpu_set(const char *arg, const struct kernel_param *kp)
{
int ret;
ret = param_set_bool(arg, kp);
if (ret)
return ret;
if (tegra_dvfs_cpu_disabled)
tegra_dvfs_rail_disable(&tegra2_dvfs_rail_vdd_cpu);
else
tegra_dvfs_rail_enable(&tegra2_dvfs_rail_vdd_cpu);
return 0;
}
int tegra_dvfs_disable_get(char *buffer, const struct kernel_param *kp)
{
return param_get_bool(buffer, kp);
}
static struct kernel_param_ops tegra_dvfs_disable_core_ops = {
.set = tegra_dvfs_disable_core_set,
.get = tegra_dvfs_disable_get,
};
static struct kernel_param_ops tegra_dvfs_disable_cpu_ops = {
.set = tegra_dvfs_disable_cpu_set,
.get = tegra_dvfs_disable_get,
};
module_param_cb(disable_core, &tegra_dvfs_disable_core_ops,
&tegra_dvfs_core_disabled, 0644);
module_param_cb(disable_cpu, &tegra_dvfs_disable_cpu_ops,
&tegra_dvfs_cpu_disabled, 0644);
void __init tegra2_init_dvfs(void)
{
int i;
struct clk *c;
struct dvfs *d;
int process_id;
int ret;
int cpu_process_id = tegra_cpu_process_id();
int core_process_id = tegra_core_process_id();
tegra_dvfs_init_rails(tegra2_dvfs_rails, ARRAY_SIZE(tegra2_dvfs_rails));
tegra_dvfs_add_relationships(tegra2_dvfs_relationships,
ARRAY_SIZE(tegra2_dvfs_relationships));
/*
* VDD_CORE must always be at least 50 mV higher than VDD_CPU
* Fill out cpu_core_millivolts based on cpu_millivolts
*/
for (i = 0; i < ARRAY_SIZE(cpu_millivolts); i++)
if (cpu_millivolts[i])
cpu_core_millivolts[i] = cpu_millivolts[i] + 50;
for (i = 0; i < ARRAY_SIZE(dvfs_init); i++) {
d = &dvfs_init[i];
process_id = d->cpu ? cpu_process_id : core_process_id;
if (d->process_id != -1 && d->process_id != process_id) {
pr_debug("tegra_dvfs: rejected %s %d, process_id %d\n",
d->clk_name, d->process_id, process_id);
if (d->cpu_process_id != -1 &&
d->cpu_process_id != cpu_process_id)
continue;
}
c = tegra_get_clock_by_name(d->clk_name);
@@ -190,19 +283,15 @@ void __init tegra2_init_dvfs(void)
continue;
}
if (d->cpu)
memcpy(d->millivolts, cpu_millivolts,
sizeof(cpu_millivolts));
else if (!strcmp(d->clk_name, "cpu"))
memcpy(d->millivolts, cpu_core_millivolts,
sizeof(cpu_core_millivolts));
else
memcpy(d->millivolts, core_millivolts,
sizeof(core_millivolts));
ret = tegra_enable_dvfs_on_clk(c, d);
if (ret)
pr_err("tegra_dvfs: failed to enable dvfs on %s\n",
c->name);
}
if (tegra_dvfs_core_disabled)
tegra_dvfs_rail_disable(&tegra2_dvfs_rail_vdd_core);
if (tegra_dvfs_cpu_disabled)
tegra_dvfs_rail_disable(&tegra2_dvfs_rail_vdd_cpu);
}

View File

@@ -110,6 +110,16 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
Be aware that not all cpufreq drivers support the conservative
governor. If unsure have a look at the help section of the
driver. Fallback governor will be the performance governor.
config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
bool "interactive"
select CPU_FREQ_GOV_INTERACTIVE
help
Use the CPUFreq governor 'interactive' as default. This allows
you to get a full dynamic cpu frequency capable system by simply
loading your cpufreq low-level hardware driver, using the
'interactive' governor for latency-sensitive workloads.
endchoice
config CPU_FREQ_GOV_PERFORMANCE
@@ -167,6 +177,12 @@ config CPU_FREQ_GOV_ONDEMAND
If in doubt, say N.
config CPU_FREQ_GOV_INTERACTIVE
tristate "'interactive' cpufreq policy governor"
help
'interactive' - This driver adds a dynamic cpufreq policy governor
designed for latency-sensitive workloads.
config CPU_FREQ_GOV_CONSERVATIVE
tristate "'conservative' cpufreq governor"
depends on CPU_FREQ

View File

@@ -9,6 +9,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o

View File

@@ -0,0 +1,681 @@
/*
* drivers/cpufreq/cpufreq_interactive.c
*
* Copyright (C) 2010 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Author: Mike Chan (mike@android.com)
*
*/
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/cpufreq.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/tick.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <asm/cputime.h>
static void (*pm_idle_old)(void);
static atomic_t active_count = ATOMIC_INIT(0);
struct cpufreq_interactive_cpuinfo {
struct timer_list cpu_timer;
int timer_idlecancel;
u64 time_in_idle;
u64 idle_exit_time;
u64 timer_run_time;
int idling;
u64 freq_change_time;
u64 freq_change_time_in_idle;
struct cpufreq_policy *policy;
struct cpufreq_frequency_table *freq_table;
unsigned int target_freq;
int governor_enabled;
};
static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
/* Workqueues handle frequency scaling */
static struct task_struct *up_task;
static struct workqueue_struct *down_wq;
static struct work_struct freq_scale_down_work;
static cpumask_t up_cpumask;
static spinlock_t up_cpumask_lock;
static cpumask_t down_cpumask;
static spinlock_t down_cpumask_lock;
/* Go to max speed when CPU load at or above this value. */
#define DEFAULT_GO_MAXSPEED_LOAD 85
static unsigned long go_maxspeed_load;
/*
* The minimum amount of time to spend at a frequency before we can ramp down.
*/
#define DEFAULT_MIN_SAMPLE_TIME 80000;
static unsigned long min_sample_time;
#define DEBUG 0
#define BUFSZ 128
#if DEBUG
#include <linux/proc_fs.h>
struct dbgln {
int cpu;
unsigned long jiffy;
unsigned long run;
char buf[BUFSZ];
};
#define NDBGLNS 256
static struct dbgln dbgbuf[NDBGLNS];
static int dbgbufs;
static int dbgbufe;
static struct proc_dir_entry *dbg_proc;
static spinlock_t dbgpr_lock;
static u64 up_request_time;
static unsigned int up_max_latency;
static void dbgpr(char *fmt, ...)
{
va_list args;
int n;
unsigned long flags;
spin_lock_irqsave(&dbgpr_lock, flags);
n = dbgbufe;
va_start(args, fmt);
vsnprintf(dbgbuf[n].buf, BUFSZ, fmt, args);
va_end(args);
dbgbuf[n].cpu = smp_processor_id();
dbgbuf[n].run = nr_running();
dbgbuf[n].jiffy = jiffies;
if (++dbgbufe >= NDBGLNS)
dbgbufe = 0;
if (dbgbufe == dbgbufs)
if (++dbgbufs >= NDBGLNS)
dbgbufs = 0;
spin_unlock_irqrestore(&dbgpr_lock, flags);
}
static void dbgdump(void)
{
int i, j;
unsigned long flags;
static struct dbgln prbuf[NDBGLNS];
spin_lock_irqsave(&dbgpr_lock, flags);
i = dbgbufs;
j = dbgbufe;
memcpy(prbuf, dbgbuf, sizeof(dbgbuf));
dbgbufs = 0;
dbgbufe = 0;
spin_unlock_irqrestore(&dbgpr_lock, flags);
while (i != j)
{
printk("%lu %d %lu %s",
prbuf[i].jiffy, prbuf[i].cpu, prbuf[i].run,
prbuf[i].buf);
if (++i == NDBGLNS)
i = 0;
}
}
static int dbg_proc_read(char *buffer, char **start, off_t offset,
int count, int *peof, void *dat)
{
printk("max up_task latency=%uus\n", up_max_latency);
dbgdump();
*peof = 1;
return 0;
}
#else
#define dbgpr(...) do {} while (0)
#endif
static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
unsigned int event);
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
static
#endif
struct cpufreq_governor cpufreq_gov_interactive = {
.name = "interactive",
.governor = cpufreq_governor_interactive,
.max_transition_latency = 10000000,
.owner = THIS_MODULE,
};
static void cpufreq_interactive_timer(unsigned long data)
{
unsigned int delta_idle;
unsigned int delta_time;
int cpu_load;
int load_since_change;
u64 time_in_idle;
u64 idle_exit_time;
struct cpufreq_interactive_cpuinfo *pcpu =
&per_cpu(cpuinfo, data);
u64 now_idle;
unsigned int new_freq;
unsigned int index;
/*
* Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
* this lets idle exit know the current idle time sample has
* been processed, and idle exit can generate a new sample and
* re-arm the timer. This prevents a concurrent idle
* exit on that CPU from writing a new set of info at the same time
* the timer function runs (the timer function can't use that info
* until more time passes).
*/
time_in_idle = pcpu->time_in_idle;
idle_exit_time = pcpu->idle_exit_time;
now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
smp_wmb();
/* If we raced with cancelling a timer, skip. */
if (!idle_exit_time) {
dbgpr("timer %d: no valid idle exit sample\n", (int) data);
goto exit;
}
#if DEBUG
if ((int) jiffies - (int) pcpu->cpu_timer.expires >= 10)
dbgpr("timer %d: late by %d ticks\n",
(int) data, jiffies - pcpu->cpu_timer.expires);
#endif
delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle);
delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
idle_exit_time);
/*
* If timer ran less than 1ms after short-term sample started, retry.
*/
if (delta_time < 1000) {
dbgpr("timer %d: time delta %u too short exit=%llu now=%llu\n", (int) data,
delta_time, idle_exit_time, pcpu->timer_run_time);
goto rearm;
}
if (delta_idle > delta_time)
cpu_load = 0;
else
cpu_load = 100 * (delta_time - delta_idle) / delta_time;
delta_idle = (unsigned int) cputime64_sub(now_idle,
pcpu->freq_change_time_in_idle);
delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
pcpu->freq_change_time);
if (delta_idle > delta_time)
load_since_change = 0;
else
load_since_change =
100 * (delta_time - delta_idle) / delta_time;
/*
* Choose greater of short-term load (since last idle timer
* started or timer function re-armed itself) or long-term load
* (since last frequency change).
*/
if (load_since_change > cpu_load)
cpu_load = load_since_change;
if (cpu_load >= go_maxspeed_load)
new_freq = pcpu->policy->max;
else
new_freq = pcpu->policy->max * cpu_load / 100;
if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
new_freq, CPUFREQ_RELATION_H,
&index)) {
dbgpr("timer %d: cpufreq_frequency_table_target error\n", (int) data);
goto rearm;
}
new_freq = pcpu->freq_table[index].frequency;
if (pcpu->target_freq == new_freq)
{
dbgpr("timer %d: load=%d, already at %d\n", (int) data, cpu_load, new_freq);
goto rearm_if_notmax;
}
/*
* Do not scale down unless we have been at this frequency for the
* minimum sample time.
*/
if (new_freq < pcpu->target_freq) {
if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) <
min_sample_time) {
dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq);
goto rearm;
}
}
dbgpr("timer %d: load=%d cur=%d tgt=%d queue\n", (int) data, cpu_load, pcpu->target_freq, new_freq);
if (new_freq < pcpu->target_freq) {
pcpu->target_freq = new_freq;
spin_lock(&down_cpumask_lock);
cpumask_set_cpu(data, &down_cpumask);
spin_unlock(&down_cpumask_lock);
queue_work(down_wq, &freq_scale_down_work);
} else {
pcpu->target_freq = new_freq;
#if DEBUG
up_request_time = ktime_to_us(ktime_get());
#endif
spin_lock(&up_cpumask_lock);
cpumask_set_cpu(data, &up_cpumask);
spin_unlock(&up_cpumask_lock);
wake_up_process(up_task);
}
rearm_if_notmax:
/*
* Already set max speed and don't see a need to change that,
* wait until next idle to re-evaluate, don't need timer.
*/
if (pcpu->target_freq == pcpu->policy->max)
goto exit;
rearm:
if (!timer_pending(&pcpu->cpu_timer)) {
/*
* If already at min: if that CPU is idle, don't set timer.
* Else cancel the timer if that CPU goes idle. We don't
* need to re-evaluate speed until the next idle exit.
*/
if (pcpu->target_freq == pcpu->policy->min) {
smp_rmb();
if (pcpu->idling) {
dbgpr("timer %d: cpu idle, don't re-arm\n", (int) data);
goto exit;
}
pcpu->timer_idlecancel = 1;
}
pcpu->time_in_idle = get_cpu_idle_time_us(
data, &pcpu->idle_exit_time);
mod_timer(&pcpu->cpu_timer, jiffies + 2);
dbgpr("timer %d: set timer for %lu exit=%llu\n", (int) data, pcpu->cpu_timer.expires, pcpu->idle_exit_time);
}
exit:
return;
}
static void cpufreq_interactive_idle(void)
{
struct cpufreq_interactive_cpuinfo *pcpu =
&per_cpu(cpuinfo, smp_processor_id());
int pending;
if (!pcpu->governor_enabled) {
pm_idle_old();
return;
}
pcpu->idling = 1;
smp_wmb();
pending = timer_pending(&pcpu->cpu_timer);
if (pcpu->target_freq != pcpu->policy->min) {
#ifdef CONFIG_SMP
/*
* Entering idle while not at lowest speed. On some
* platforms this can hold the other CPU(s) at that speed
* even though the CPU is idle. Set a timer to re-evaluate
* speed so this idle CPU doesn't hold the other CPUs above
* min indefinitely. This should probably be a quirk of
* the CPUFreq driver.
*/
if (!pending) {
pcpu->time_in_idle = get_cpu_idle_time_us(
smp_processor_id(), &pcpu->idle_exit_time);
pcpu->timer_idlecancel = 0;
mod_timer(&pcpu->cpu_timer, jiffies + 2);
dbgpr("idle: enter at %d, set timer for %lu exit=%llu\n",
pcpu->target_freq, pcpu->cpu_timer.expires,
pcpu->idle_exit_time);
}
#endif
} else {
/*
* If at min speed and entering idle after load has
* already been evaluated, and a timer has been set just in
* case the CPU suddenly goes busy, cancel that timer. The
* CPU didn't go busy; we'll recheck things upon idle exit.
*/
if (pending && pcpu->timer_idlecancel) {
dbgpr("idle: cancel timer for %lu\n", pcpu->cpu_timer.expires);
del_timer(&pcpu->cpu_timer);
/*
* Ensure last timer run time is after current idle
* sample start time, so next idle exit will always
* start a new idle sampling period.
*/
pcpu->idle_exit_time = 0;
pcpu->timer_idlecancel = 0;
}
}
pm_idle_old();
pcpu->idling = 0;
smp_wmb();
/*
* Arm the timer for 1-2 ticks later if not already, and if the timer
* function has already processed the previous load sampling
* interval. (If the timer is not pending but has not processed
* the previous interval, it is probably racing with us on another
* CPU. Let it compute load based on the previous sample and then
* re-arm the timer for another interval when it's done, rather
* than updating the interval start time to be "now", which doesn't
* give the timer function enough time to make a decision on this
* run.)
*/
if (timer_pending(&pcpu->cpu_timer) == 0 &&
pcpu->timer_run_time >= pcpu->idle_exit_time) {
pcpu->time_in_idle =
get_cpu_idle_time_us(smp_processor_id(),
&pcpu->idle_exit_time);
pcpu->timer_idlecancel = 0;
mod_timer(&pcpu->cpu_timer, jiffies + 2);
dbgpr("idle: exit, set timer for %lu exit=%llu\n", pcpu->cpu_timer.expires, pcpu->idle_exit_time);
#if DEBUG
} else if (timer_pending(&pcpu->cpu_timer) == 0 &&
pcpu->timer_run_time < pcpu->idle_exit_time) {
dbgpr("idle: timer not run yet: exit=%llu tmrrun=%llu\n",
pcpu->idle_exit_time, pcpu->timer_run_time);
#endif
}
}
static int cpufreq_interactive_up_task(void *data)
{
unsigned int cpu;
cpumask_t tmp_mask;
struct cpufreq_interactive_cpuinfo *pcpu;
#if DEBUG
u64 now;
u64 then;
unsigned int lat;
#endif
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
spin_lock(&up_cpumask_lock);
if (cpumask_empty(&up_cpumask)) {
spin_unlock(&up_cpumask_lock);
schedule();
if (kthread_should_stop())
break;
spin_lock(&up_cpumask_lock);
}
set_current_state(TASK_RUNNING);
#if DEBUG
then = up_request_time;
now = ktime_to_us(ktime_get());
if (now > then) {
lat = ktime_to_us(ktime_get()) - then;
if (lat > up_max_latency)
up_max_latency = lat;
}
#endif
tmp_mask = up_cpumask;
cpumask_clear(&up_cpumask);
spin_unlock(&up_cpumask_lock);
for_each_cpu(cpu, &tmp_mask) {
pcpu = &per_cpu(cpuinfo, cpu);
if (nr_running() == 1) {
dbgpr("up %d: tgt=%d nothing else running\n", cpu,
pcpu->target_freq);
}
__cpufreq_driver_target(pcpu->policy,
pcpu->target_freq,
CPUFREQ_RELATION_H);
pcpu->freq_change_time_in_idle =
get_cpu_idle_time_us(cpu,
&pcpu->freq_change_time);
dbgpr("up %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur);
}
}
return 0;
}
static void cpufreq_interactive_freq_down(struct work_struct *work)
{
unsigned int cpu;
cpumask_t tmp_mask;
struct cpufreq_interactive_cpuinfo *pcpu;
spin_lock(&down_cpumask_lock);
tmp_mask = down_cpumask;
cpumask_clear(&down_cpumask);
spin_unlock(&down_cpumask_lock);
for_each_cpu(cpu, &tmp_mask) {
pcpu = &per_cpu(cpuinfo, cpu);
__cpufreq_driver_target(pcpu->policy,
pcpu->target_freq,
CPUFREQ_RELATION_H);
pcpu->freq_change_time_in_idle =
get_cpu_idle_time_us(cpu,
&pcpu->freq_change_time);
dbgpr("down %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur);
}
}
static ssize_t show_go_maxspeed_load(struct kobject *kobj,
struct attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", go_maxspeed_load);
}
static ssize_t store_go_maxspeed_load(struct kobject *kobj,
struct attribute *attr, const char *buf, size_t count)
{
return strict_strtoul(buf, 0, &go_maxspeed_load);
}
static struct global_attr go_maxspeed_load_attr = __ATTR(go_maxspeed_load, 0644,
show_go_maxspeed_load, store_go_maxspeed_load);
static ssize_t show_min_sample_time(struct kobject *kobj,
struct attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", min_sample_time);
}
static ssize_t store_min_sample_time(struct kobject *kobj,
struct attribute *attr, const char *buf, size_t count)
{
return strict_strtoul(buf, 0, &min_sample_time);
}
static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
show_min_sample_time, store_min_sample_time);
static struct attribute *interactive_attributes[] = {
&go_maxspeed_load_attr.attr,
&min_sample_time_attr.attr,
NULL,
};
static struct attribute_group interactive_attr_group = {
.attrs = interactive_attributes,
.name = "interactive",
};
static int cpufreq_governor_interactive(struct cpufreq_policy *new_policy,
unsigned int event)
{
int rc;
struct cpufreq_interactive_cpuinfo *pcpu =
&per_cpu(cpuinfo, new_policy->cpu);
switch (event) {
case CPUFREQ_GOV_START:
if (!cpu_online(new_policy->cpu))
return -EINVAL;
pcpu->policy = new_policy;
pcpu->freq_table = cpufreq_frequency_get_table(new_policy->cpu);
pcpu->target_freq = new_policy->cur;
pcpu->freq_change_time_in_idle =
get_cpu_idle_time_us(new_policy->cpu,
&pcpu->freq_change_time);
pcpu->governor_enabled = 1;
/*
* Do not register the idle hook and create sysfs
* entries if we have already done so.
*/
if (atomic_inc_return(&active_count) > 1)
return 0;
rc = sysfs_create_group(cpufreq_global_kobject,
&interactive_attr_group);
if (rc)
return rc;
pm_idle_old = pm_idle;
pm_idle = cpufreq_interactive_idle;
break;
case CPUFREQ_GOV_STOP:
pcpu->governor_enabled = 0;
if (atomic_dec_return(&active_count) > 0)
return 0;
sysfs_remove_group(cpufreq_global_kobject,
&interactive_attr_group);
pm_idle = pm_idle_old;
del_timer(&pcpu->cpu_timer);
break;
case CPUFREQ_GOV_LIMITS:
if (new_policy->max < new_policy->cur)
__cpufreq_driver_target(new_policy,
new_policy->max, CPUFREQ_RELATION_H);
else if (new_policy->min > new_policy->cur)
__cpufreq_driver_target(new_policy,
new_policy->min, CPUFREQ_RELATION_L);
break;
}
return 0;
}
static int __init cpufreq_interactive_init(void)
{
unsigned int i;
struct cpufreq_interactive_cpuinfo *pcpu;
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD;
min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
/* Initalize per-cpu timers */
for_each_possible_cpu(i) {
pcpu = &per_cpu(cpuinfo, i);
init_timer(&pcpu->cpu_timer);
pcpu->cpu_timer.function = cpufreq_interactive_timer;
pcpu->cpu_timer.data = i;
}
up_task = kthread_create(cpufreq_interactive_up_task, NULL,
"kinteractiveup");
if (IS_ERR(up_task))
return PTR_ERR(up_task);
sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
get_task_struct(up_task);
/* No rescuer thread, bind to CPU queuing the work for possibly
warm cache (probably doesn't matter much). */
down_wq = alloc_workqueue("knteractive_down", 0, 1);
if (! down_wq)
goto err_freeuptask;
INIT_WORK(&freq_scale_down_work,
cpufreq_interactive_freq_down);
spin_lock_init(&up_cpumask_lock);
spin_lock_init(&down_cpumask_lock);
#if DEBUG
spin_lock_init(&dbgpr_lock);
dbg_proc = create_proc_entry("igov", S_IWUSR | S_IRUGO, NULL);
dbg_proc->read_proc = dbg_proc_read;
#endif
return cpufreq_register_governor(&cpufreq_gov_interactive);
err_freeuptask:
put_task_struct(up_task);
return -ENOMEM;
}
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
fs_initcall(cpufreq_interactive_init);
#else
module_init(cpufreq_interactive_init);
#endif
static void __exit cpufreq_interactive_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_interactive);
kthread_stop(up_task);
put_task_struct(up_task);
destroy_workqueue(down_wq);
}
module_exit(cpufreq_interactive_exit);
MODULE_AUTHOR("Mike Chan <mike@android.com>");
MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
"Latency sensitive workloads");
MODULE_LICENSE("GPL");

View File

@@ -490,7 +490,7 @@ int tegra_dc_update_windows(struct tegra_dc_win *windows[], int n)
if (no_vsync)
tegra_dc_writel(dc, WRITE_MUX_ACTIVE | READ_MUX_ACTIVE, DC_CMD_STATE_ACCESS);
else
tegra_dc_writel(dc, WRITE_MUX_ASSEMBLY | WRITE_MUX_ASSEMBLY, DC_CMD_STATE_ACCESS);
tegra_dc_writel(dc, WRITE_MUX_ASSEMBLY | READ_MUX_ASSEMBLY, DC_CMD_STATE_ACCESS);
for (i = 0; i < n; i++) {
struct tegra_dc_win *win = windows[i];
@@ -823,7 +823,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *ptr)
val = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
for (i = 0; i < DC_N_WINDOWS; i++) {
if (!(val & (WIN_A_ACT_REQ << i))) {
if (!(val & (WIN_A_UPDATE << i))) {
dc->windows[i].dirty = 0;
completed = 1;
} else {

View File

@@ -89,6 +89,10 @@
#define WIN_A_ACT_REQ (1 << 1)
#define WIN_B_ACT_REQ (1 << 2)
#define WIN_C_ACT_REQ (1 << 3)
#define GENERAL_UPDATE (1 << 0)
#define WIN_A_UPDATE (1 << 1)
#define WIN_B_UPDATE (1 << 2)
#define WIN_C_UPDATE (1 << 3)
#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042
#define WINDOW_A_SELECT (1 << 4)

View File

@@ -2924,6 +2924,24 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
err = percpu_counter_init(&sbi->s_freeblocks_counter,
ext4_count_free_blocks(sb));
if (!err) {
err = percpu_counter_init(&sbi->s_freeinodes_counter,
ext4_count_free_inodes(sb));
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirs_counter,
ext4_count_dirs(sb));
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
}
if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
goto failed_mount3;
}
sbi->s_stripe = ext4_get_stripe_size(sbi);
sbi->s_max_writeback_mb_bump = 128;
@@ -3022,22 +3040,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
no_journal:
err = percpu_counter_init(&sbi->s_freeblocks_counter,
ext4_count_free_blocks(sb));
if (!err)
err = percpu_counter_init(&sbi->s_freeinodes_counter,
ext4_count_free_inodes(sb));
if (!err)
err = percpu_counter_init(&sbi->s_dirs_counter,
ext4_count_dirs(sb));
if (!err)
err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
goto failed_mount_wq;
}
/*
* The journal may have updated the bg summary counts, so we
* need to update the global counters.
*/
percpu_counter_set(&sbi->s_freeblocks_counter,
ext4_count_free_blocks(sb));
percpu_counter_set(&sbi->s_freeinodes_counter,
ext4_count_free_inodes(sb));
percpu_counter_set(&sbi->s_dirs_counter,
ext4_count_dirs(sb));
percpu_counter_set(&sbi->s_dirtyblocks_counter, 0);
no_journal:
EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
if (!EXT4_SB(sb)->dio_unwritten_wq) {
printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
@@ -3184,10 +3199,6 @@ failed_mount_wq:
jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
}
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
failed_mount3:
if (sbi->s_flex_groups) {
if (is_vmalloc_addr(sbi->s_flex_groups))
@@ -3195,6 +3206,10 @@ failed_mount3:
else
kfree(sbi->s_flex_groups);
}
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
@@ -3523,13 +3538,11 @@ static int ext4_commit_super(struct super_block *sb, int sync)
else
es->s_kbytes_written =
cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeblocks_counter))
ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeblocks_counter));
if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
es->s_free_inodes_count =
cpu_to_le32(percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeinodes_counter));
ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeblocks_counter));
es->s_free_inodes_count =
cpu_to_le32(percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeinodes_counter));
sb->s_dirt = 0;
BUFFER_TRACE(sbh, "marking dirty");
mark_buffer_dirty(sbh);

View File

@@ -134,6 +134,7 @@ EXPORT_SYMBOL_GPL(fuse_do_open);
void fuse_finish_open(struct inode *inode, struct file *file)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = get_fuse_conn(inode);
if (ff->open_flags & FOPEN_DIRECT_IO)
file->f_op = &fuse_direct_io_file_operations;
@@ -141,6 +142,15 @@ void fuse_finish_open(struct inode *inode, struct file *file)
invalidate_inode_pages2(inode->i_mapping);
if (ff->open_flags & FOPEN_NONSEEKABLE)
nonseekable_open(inode, file);
if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
struct fuse_inode *fi = get_fuse_inode(inode);
spin_lock(&fc->lock);
fi->attr_version = ++fc->attr_version;
i_size_write(inode, 0);
spin_unlock(&fc->lock);
fuse_invalidate_attr(inode);
}
}
int fuse_open_common(struct inode *inode, struct file *file, bool isdir)

View File

@@ -364,6 +364,9 @@ extern struct cpufreq_governor cpufreq_gov_ondemand;
#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
extern struct cpufreq_governor cpufreq_gov_conservative;
#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative)
#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
extern struct cpufreq_governor cpufreq_gov_interactive;
#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_interactive)
#endif