Merge branch 'linux-tegra-2.6.36' into android-tegra-2.6.36

This commit is contained in:
Colin Cross
2010-11-04 18:17:50 -07:00
10 changed files with 565 additions and 342 deletions

View File

@@ -18,119 +18,205 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/clkdev.h>
#include <mach/clk.h>
#include "board.h"
#include "clock.h"
#include "dvfs.h"
static LIST_HEAD(clocks);
/*
* clock_lock must be held when:
* Accessing any clock register non-atomically
* or
* Relying on any state of a clk struct not to change, unless clk_is_dvfs
* returns true on that clk struct, and dvfs_lock is held instead.
* Locking:
*
* Any function that changes the state of a clk struct must hold
* the dvfs_lock if clk_is_auto_dvfs(clk) is true, and the clock_lock.
* Each struct clk has a lock. Depending on the cansleep flag, that lock
* may be a spinlock or a mutex. For most clocks, the spinlock is sufficient,
* and using the spinlock allows the clock to be manipulated from an interrupt
* or while holding a spinlock. Some clocks may need to adjust a regulator
* in order to maintain the required voltage for a new frequency. Those
* clocks set the cansleep flag, and take a mutex so that the regulator api
* can be used while holding the lock.
*
* When taking dvfs_lock and clock_lock, dvfs_lock must be taken first.
* To avoid AB-BA locking problems, locks must always be traversed from child
* clock to parent clock. For example, when enabling a clock, the clock's lock
* is taken, and then clk_enable is called on the parent, which take's the
* parent clock's lock. There are two exceptions to this ordering:
* 1. When setting a clock as cansleep, in which case the entire list of clocks
* is traversed to set the children as cansleep as well. This must occur
* during init, before any calls to clk_get, so no other clock locks can
* get taken.
* 2. When dumping the clock tree through debugfs. In this case, clk_lock_all
* is called, which attemps to iterate through the entire list of clocks
* and take every clock lock. If any call to clk_trylock fails, a locked
* clocks are unlocked, and the process is retried. When all the locks
* are held, the only clock operation that can be called is
* clk_get_rate_all_locked.
*
* Within a single clock, no clock operation can call another clock operation
* on itself, except for clk_get_rate_locked. Any clock operation can call
* any other clock operation on any of it's possible parents.
*
* clk_set_cansleep is used to mark a clock as sleeping. It is called during
* dvfs (Dynamic Voltage and Frequency Scaling) init on any clock that has a
* dvfs requirement. It can only be called on clocks that are the sole parent
* of all of their child clocks, meaning the child clock can not be reparented
* onto a different, possibly non-sleeping, clock. This is inherently true
* of all leaf clocks in the clock tree
*
* An additional lock, clock_list_lock, is used to protect the list of all
* clocks.
*
* The clock operations must lock internally to protect against
* read-modify-write on registers that are shared by multiple clocks
*/
static DEFINE_SPINLOCK(clock_lock);
static DEFINE_MUTEX(clock_list_lock);
static LIST_HEAD(clocks);
static inline bool clk_is_auto_dvfs(struct clk *c)
{
smp_rmb();
return c->auto_dvfs;
};
}
static inline bool clk_is_dvfs(struct clk *c)
{
smp_rmb();
return c->is_dvfs;
};
}
static inline bool clk_cansleep(struct clk *c)
{
return c->cansleep;
}
#define clk_lock_save(c, flags) \
do { \
if (clk_cansleep(c)) { \
flags = 0; \
mutex_lock(&c->mutex); \
} else { \
spin_lock_irqsave(&c->spinlock, flags); \
} \
} while (0)
#define clk_unlock_restore(c, flags) \
do { \
if (clk_cansleep(c)) \
mutex_unlock(&c->mutex); \
else \
spin_unlock_irqrestore(&c->spinlock, flags); \
} while (0)
static inline void clk_lock_init(struct clk *c)
{
mutex_init(&c->mutex);
spin_lock_init(&c->spinlock);
}
struct clk *tegra_get_clock_by_name(const char *name)
{
struct clk *c;
struct clk *ret = NULL;
unsigned long flags;
spin_lock_irqsave(&clock_lock, flags);
mutex_lock(&clock_list_lock);
list_for_each_entry(c, &clocks, node) {
if (strcmp(c->name, name) == 0) {
ret = c;
break;
}
}
spin_unlock_irqrestore(&clock_lock, flags);
mutex_unlock(&clock_list_lock);
return ret;
}
/* Must be called with clk_lock(c) held */
static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p)
{
u64 rate;
rate = p->rate;
rate = clk_get_rate(p);
if (c->mul != 0 && c->div != 0) {
rate = rate * c->mul;
rate *= c->mul;
do_div(rate, c->div);
}
return rate;
}
static void clk_recalculate_rate(struct clk *c)
/* Must be called with clk_lock(c) held */
unsigned long clk_get_rate_locked(struct clk *c)
{
unsigned long rate;
if (!c->parent)
return;
if (c->parent)
rate = clk_predict_rate_from_parent(c, c->parent);
else
rate = c->rate;
rate = clk_predict_rate_from_parent(c, c->parent);
return rate;
}
if (rate > c->max_rate)
pr_warn("clocks: Set clock %s to rate %lu, max is %lu\n",
c->name, rate, c->max_rate);
unsigned long clk_get_rate(struct clk *c)
{
unsigned long flags;
unsigned long rate;
c->rate = rate;
clk_lock_save(c, flags);
rate = clk_get_rate_locked(c);
clk_unlock_restore(c, flags);
return rate;
}
EXPORT_SYMBOL(clk_get_rate);
static void __clk_set_cansleep(struct clk *c)
{
struct clk *child;
BUG_ON(mutex_is_locked(&c->mutex));
BUG_ON(spin_is_locked(&c->spinlock));
list_for_each_entry(child, &clocks, node) {
if (child->parent != c)
continue;
WARN(child->ops && child->ops->set_parent,
"can't make child clock %s of %s "
"sleepable if it's parent could change",
child->name, c->name);
__clk_set_cansleep(child);
}
c->cansleep = true;
}
/* Must be called before any clk_get calls */
void clk_set_cansleep(struct clk *c)
{
mutex_lock(&clock_list_lock);
__clk_set_cansleep(c);
mutex_unlock(&clock_list_lock);
}
int clk_reparent(struct clk *c, struct clk *parent)
{
c->parent = parent;
list_del(&c->sibling);
list_add_tail(&c->sibling, &parent->children);
return 0;
}
static void propagate_rate(struct clk *c)
{
struct clk *clkp;
list_for_each_entry(clkp, &c->children, sibling) {
clk_recalculate_rate(clkp);
propagate_rate(clkp);
}
}
void clk_init(struct clk *c)
{
unsigned long flags;
clk_lock_init(c);
spin_lock_irqsave(&clock_lock, flags);
INIT_LIST_HEAD(&c->children);
INIT_LIST_HEAD(&c->sibling);
INIT_LIST_HEAD(&c->dvfs);
if (c->ops && c->ops->init)
@@ -145,71 +231,58 @@ void clk_init(struct clk *c)
c->state = ON;
}
clk_recalculate_rate(c);
mutex_lock(&clock_list_lock);
list_add(&c->node, &clocks);
if (c->parent)
list_add_tail(&c->sibling, &c->parent->children);
spin_unlock_irqrestore(&clock_lock, flags);
mutex_unlock(&clock_list_lock);
}
int clk_enable_locked(struct clk *c)
int clk_enable(struct clk *c)
{
int ret;
int ret = 0;
unsigned long flags;
clk_lock_save(c, flags);
if (clk_is_auto_dvfs(c)) {
ret = tegra_dvfs_set_rate(c, clk_get_rate_locked(c));
if (ret)
goto out;
}
if (c->refcnt == 0) {
if (c->parent) {
ret = clk_enable_locked(c->parent);
ret = clk_enable(c->parent);
if (ret)
return ret;
goto out;
}
if (c->ops && c->ops->enable) {
ret = c->ops->enable(c);
if (ret) {
if (c->parent)
clk_disable_locked(c->parent);
return ret;
clk_disable(c->parent);
goto out;
}
c->state = ON;
c->set = true;
}
}
c->refcnt++;
return 0;
}
int clk_enable(struct clk *c)
{
int ret;
unsigned long flags;
if (clk_is_auto_dvfs(c)) {
lock_dvfs();
ret = tegra_dvfs_set_rate(c, c->rate);
if (ret)
goto out;
}
spin_lock_irqsave(&clock_lock, flags);
ret = clk_enable_locked(c);
spin_unlock_irqrestore(&clock_lock, flags);
out:
if (clk_is_auto_dvfs(c))
unlock_dvfs();
clk_unlock_restore(c, flags);
return ret;
}
EXPORT_SYMBOL(clk_enable);
void clk_disable_locked(struct clk *c)
void clk_disable(struct clk *c)
{
unsigned long flags;
clk_lock_save(c, flags);
if (c->refcnt == 0) {
WARN(1, "Attempting to disable clock %s with refcnt 0", c->name);
clk_unlock_restore(c, flags);
return;
}
if (c->refcnt == 1) {
@@ -217,79 +290,53 @@ void clk_disable_locked(struct clk *c)
c->ops->disable(c);
if (c->parent)
clk_disable_locked(c->parent);
clk_disable(c->parent);
c->state = OFF;
}
c->refcnt--;
}
void clk_disable(struct clk *c)
{
unsigned long flags;
if (clk_is_auto_dvfs(c) && c->refcnt == 0)
tegra_dvfs_set_rate(c, 0);
if (clk_is_auto_dvfs(c))
lock_dvfs();
spin_lock_irqsave(&clock_lock, flags);
clk_disable_locked(c);
spin_unlock_irqrestore(&clock_lock, flags);
if (clk_is_auto_dvfs(c)) {
if (c->refcnt == 0)
tegra_dvfs_set_rate(c, 0);
unlock_dvfs();
}
clk_unlock_restore(c, flags);
}
EXPORT_SYMBOL(clk_disable);
int clk_set_parent_locked(struct clk *c, struct clk *parent)
{
int ret;
if (!c->ops || !c->ops->set_parent)
return -ENOSYS;
ret = c->ops->set_parent(c, parent);
if (ret)
return ret;
clk_recalculate_rate(c);
propagate_rate(c);
return 0;
}
int clk_set_parent(struct clk *c, struct clk *parent)
{
int ret = 0;
unsigned long flags;
unsigned long new_rate = clk_predict_rate_from_parent(c, parent);
unsigned long new_rate;
unsigned long old_rate;
clk_lock_save(c, flags);
if (clk_is_auto_dvfs(c)) {
lock_dvfs();
if (c->refcnt > 0 && (!c->parent || new_rate > c->rate))
ret = tegra_dvfs_set_rate(c, new_rate);
if (!ret)
if (!c->ops || !c->ops->set_parent) {
ret = -ENOSYS;
goto out;
}
new_rate = clk_predict_rate_from_parent(c, parent);
old_rate = clk_get_rate_locked(c);
if (clk_is_auto_dvfs(c) && c->refcnt > 0 &&
(!c->parent || new_rate > old_rate)) {
ret = tegra_dvfs_set_rate(c, new_rate);
if (ret)
goto out;
}
spin_lock_irqsave(&clock_lock, flags);
ret = clk_set_parent_locked(c, parent);
spin_unlock_irqrestore(&clock_lock, flags);
if (!ret)
ret = c->ops->set_parent(c, parent);
if (ret)
goto out;
if (clk_is_auto_dvfs(c) && c->refcnt > 0)
if (clk_is_auto_dvfs(c) && c->refcnt > 0 &&
new_rate < old_rate)
ret = tegra_dvfs_set_rate(c, new_rate);
out:
if (clk_is_auto_dvfs(c))
unlock_dvfs();
clk_unlock_restore(c, flags);
return ret;
}
EXPORT_SYMBOL(clk_set_parent);
@@ -300,86 +347,87 @@ struct clk *clk_get_parent(struct clk *c)
}
EXPORT_SYMBOL(clk_get_parent);
int clk_set_rate_locked(struct clk *c, unsigned long rate)
{
int ret;
if (rate == c->requested_rate)
return 0;
if (rate > c->max_rate)
rate = c->max_rate;
if (!c->ops || !c->ops->set_rate)
return -ENOSYS;
c->requested_rate = rate;
ret = c->ops->set_rate(c, rate);
if (ret)
return ret;
clk_recalculate_rate(c);
propagate_rate(c);
return 0;
}
int clk_set_rate(struct clk *c, unsigned long rate)
{
int ret = 0;
unsigned long flags;
unsigned long old_rate;
if (clk_is_auto_dvfs(c)) {
lock_dvfs();
if (rate > c->rate && c->refcnt > 0)
ret = tegra_dvfs_set_rate(c, rate);
if (ret)
goto out;
clk_lock_save(c, flags);
if (!c->ops || !c->ops->set_rate) {
ret = -ENOSYS;
goto out;
}
spin_lock_irqsave(&clock_lock, flags);
ret = clk_set_rate_locked(c, rate);
spin_unlock_irqrestore(&clock_lock, flags);
if (ret)
goto out;
if (clk_is_auto_dvfs(c) && c->refcnt > 0)
ret = tegra_dvfs_set_rate(c, rate);
out:
if (clk_is_auto_dvfs(c))
unlock_dvfs();
return ret;
}
EXPORT_SYMBOL(clk_set_rate);
unsigned long clk_get_rate(struct clk *c)
{
unsigned long flags;
unsigned long ret;
spin_lock_irqsave(&clock_lock, flags);
ret = c->rate;
spin_unlock_irqrestore(&clock_lock, flags);
return ret;
}
EXPORT_SYMBOL(clk_get_rate);
long clk_round_rate(struct clk *c, unsigned long rate)
{
if (!c->ops || !c->ops->round_rate)
return -ENOSYS;
old_rate = clk_get_rate_locked(c);
if (rate > c->max_rate)
rate = c->max_rate;
return c->ops->round_rate(c, rate);
if (clk_is_auto_dvfs(c) && rate > old_rate && c->refcnt > 0) {
ret = tegra_dvfs_set_rate(c, rate);
if (ret)
goto out;
}
ret = c->ops->set_rate(c, rate);
if (ret)
goto out;
if (clk_is_auto_dvfs(c) && rate < old_rate && c->refcnt > 0)
ret = tegra_dvfs_set_rate(c, rate);
out:
clk_unlock_restore(c, flags);
return ret;
}
EXPORT_SYMBOL(clk_set_rate);
/* Must be called with clocks lock and all indvidual clock locks held */
unsigned long clk_get_rate_all_locked(struct clk *c)
{
u64 rate;
int mul = 1;
int div = 1;
struct clk *p = c;
while (p) {
c = p;
if (c->mul != 0 && c->div != 0) {
mul *= c->mul;
div *= c->div;
}
p = c->parent;
}
rate = c->rate;
rate *= mul;
do_div(rate, div);
return rate;
}
long clk_round_rate(struct clk *c, unsigned long rate)
{
unsigned long flags;
long ret;
clk_lock_save(c, flags);
if (!c->ops || !c->ops->round_rate) {
ret = -ENOSYS;
goto out;
}
if (rate > c->max_rate)
rate = c->max_rate;
ret = c->ops->round_rate(c, rate);
out:
clk_unlock_restore(c, flags);
return ret;
}
EXPORT_SYMBOL(clk_round_rate);
@@ -459,32 +507,51 @@ EXPORT_SYMBOL(tegra_periph_reset_assert);
void __init tegra_init_clock(void)
{
tegra2_init_clocks();
tegra2_init_dvfs();
}
/*
* Iterate through all clocks, setting the dvfs rate to the current clock
* rate on all auto dvfs clocks, and to the saved dvfs rate on all manual
* dvfs clocks. Used to enable dvfs during late init, after the regulators
* are available.
*/
void __init tegra_clk_set_dvfs_rates(void)
{
unsigned long flags;
struct clk *c;
mutex_lock(&clock_list_lock);
list_for_each_entry(c, &clocks, node) {
clk_lock_save(c, flags);
if (clk_is_auto_dvfs(c)) {
if (c->refcnt > 0)
tegra_dvfs_set_rate(c, c->rate);
tegra_dvfs_set_rate(c, clk_get_rate_locked(c));
else
tegra_dvfs_set_rate(c, 0);
} else if (clk_is_dvfs(c)) {
tegra_dvfs_set_rate(c, c->dvfs_rate);
}
clk_unlock_restore(c, flags);
}
mutex_unlock(&clock_list_lock);
}
/*
* Iterate through all clocks, disabling any for which the refcount is 0
* but the clock init detected the bootloader left the clock on.
*/
int __init tegra_disable_boot_clocks(void)
{
unsigned long flags;
struct clk *c;
lock_dvfs();
spin_lock_irqsave(&clock_lock, flags);
mutex_lock(&clock_list_lock);
list_for_each_entry(c, &clocks, node) {
clk_lock_save(c, flags);
if (c->refcnt == 0 && c->state == ON &&
c->ops && c->ops->disable) {
pr_warning("Disabling clock %s left on by bootloader\n",
@@ -492,15 +559,135 @@ int __init tegra_disable_boot_clocks(void)
c->ops->disable(c);
c->state = OFF;
}
clk_unlock_restore(c, flags);
}
spin_unlock_irqrestore(&clock_lock, flags);
unlock_dvfs();
mutex_unlock(&clock_list_lock);
return 0;
}
late_initcall(tegra_disable_boot_clocks);
int __init tegra_late_init_clock(void)
{
tegra_dvfs_late_init();
tegra_disable_boot_clocks();
tegra_clk_set_dvfs_rates();
return 0;
}
late_initcall(tegra_late_init_clock);
#ifdef CONFIG_DEBUG_FS
/*
* Attempt to lock all the clocks that are marked cansleep
* Must be called with irqs enabled
*/
static int __clk_lock_all_mutexes(void)
{
struct clk *c;
might_sleep();
list_for_each_entry(c, &clocks, node)
if (clk_cansleep(c))
if (!mutex_trylock(&c->mutex))
goto unlock_mutexes;
return 0;
unlock_mutexes:
list_for_each_entry_continue_reverse(c, &clocks, node)
if (clk_cansleep(c))
mutex_unlock(&c->mutex);
return -EAGAIN;
}
/*
* Attempt to lock all the clocks that are not marked cansleep
* Must be called with irqs disabled
*/
static int __clk_lock_all_spinlocks(void)
{
struct clk *c;
list_for_each_entry(c, &clocks, node)
if (!clk_cansleep(c))
if (!spin_trylock(&c->spinlock))
goto unlock_spinlocks;
return 0;
unlock_spinlocks:
list_for_each_entry_continue_reverse(c, &clocks, node)
if (!clk_cansleep(c))
spin_unlock(&c->spinlock);
return -EAGAIN;
}
static void __clk_unlock_all_mutexes(void)
{
struct clk *c;
list_for_each_entry_reverse(c, &clocks, node)
if (clk_cansleep(c))
mutex_unlock(&c->mutex);
}
static void __clk_unlock_all_spinlocks(void)
{
struct clk *c;
list_for_each_entry_reverse(c, &clocks, node)
if (!clk_cansleep(c))
spin_unlock(&c->spinlock);
}
/*
* This function retries until it can take all locks, and may take
* an arbitrarily long time to complete.
* Must be called with irqs enabled, returns with irqs disabled
* Must be called with clock_list_lock held
*/
static void clk_lock_all(void)
{
int ret;
retry:
ret = __clk_lock_all_mutexes();
if (ret)
goto failed_mutexes;
local_irq_disable();
ret = __clk_lock_all_spinlocks();
if (ret)
goto failed_spinlocks;
/* All locks taken successfully, return */
return;
failed_spinlocks:
local_irq_enable();
__clk_unlock_all_mutexes();
failed_mutexes:
msleep(1);
goto retry;
}
/*
* Unlocks all clocks after a clk_lock_all
* Must be called with irqs disabled, returns with irqs enabled
* Must be called with clock_list_lock held
*/
static void clk_unlock_all(void)
{
__clk_unlock_all_spinlocks();
local_irq_enable();
__clk_unlock_all_mutexes();
}
static struct dentry *clk_debugfs_root;
static void dvfs_show_one(struct seq_file *s, struct dvfs *d, int level)
@@ -515,7 +702,6 @@ static void dvfs_show_one(struct seq_file *s, struct dvfs *d, int level)
static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
{
struct clk *child;
struct clk *safe;
struct dvfs *d;
const char *state = "uninit";
char div[8] = {0};
@@ -547,12 +733,15 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
c->rate > c->max_rate ? '!' : ' ',
!c->set ? '*' : ' ',
30 - level * 3, c->name,
state, c->refcnt, div, c->rate);
state, c->refcnt, div, clk_get_rate_all_locked(c));
list_for_each_entry(d, &c->dvfs, node)
dvfs_show_one(s, d, level + 1);
list_for_each_entry_safe(child, safe, &c->children, sibling) {
list_for_each_entry(child, &clocks, node) {
if (child->parent != c)
continue;
clock_tree_show_one(s, child, level + 1);
}
}
@@ -560,14 +749,20 @@ static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
static int clock_tree_show(struct seq_file *s, void *data)
{
struct clk *c;
unsigned long flags;
seq_printf(s, " clock state ref div rate\n");
seq_printf(s, "--------------------------------------------------------------\n");
spin_lock_irqsave(&clock_lock, flags);
mutex_lock(&clock_list_lock);
clk_lock_all();
list_for_each_entry(c, &clocks, node)
if (c->parent == NULL)
clock_tree_show_one(s, c, 0);
spin_unlock_irqrestore(&clock_lock, flags);
clk_unlock_all();
mutex_unlock(&clock_list_lock);
return 0;
}

View File

@@ -21,6 +21,8 @@
#define __MACH_TEGRA_CLOCK_H
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <asm/clkdev.h>
#define DIV_BUS (1 << 0)
@@ -75,8 +77,6 @@ enum clk_state {
struct clk {
/* node for master clocks list */
struct list_head node; /* node for list of all clocks */
struct list_head children; /* list of children */
struct list_head sibling; /* node for children */
struct list_head dvfs; /* list of dvfs dependencies */
struct clk_lookup lookup;
@@ -91,11 +91,11 @@ struct clk {
unsigned long max_rate;
bool is_dvfs;
bool auto_dvfs;
bool cansleep;
u32 flags;
const char *name;
u32 refcnt;
unsigned long requested_rate;
enum clk_state state;
struct clk *parent;
u32 div;
@@ -137,8 +137,10 @@ struct clk {
unsigned long rate;
} shared_bus_user;
} u;
};
struct mutex mutex;
spinlock_t spinlock;
};
struct clk_duplicate {
const char *name;
@@ -158,12 +160,10 @@ void tegra2_periph_reset_assert(struct clk *c);
void clk_init(struct clk *clk);
struct clk *tegra_get_clock_by_name(const char *name);
unsigned long clk_measure_input_freq(void);
void clk_disable_locked(struct clk *c);
int clk_enable_locked(struct clk *c);
int clk_set_parent_locked(struct clk *c, struct clk *parent);
int clk_set_rate_locked(struct clk *c, unsigned long rate);
int clk_reparent(struct clk *c, struct clk *parent);
void tegra_clk_init_from_table(struct tegra_clk_init_table *table);
void tegra_clk_set_dvfs_rates(void);
void clk_set_cansleep(struct clk *c);
unsigned long clk_get_rate_locked(struct clk *c);
#endif

View File

@@ -59,10 +59,10 @@ static __initdata struct tegra_clk_init_table common_clk_init_table[] = {
{ "pll_p_out1", "pll_p", 28800000, true },
{ "pll_p_out2", "pll_p", 48000000, true },
{ "pll_p_out3", "pll_p", 72000000, true },
{ "pll_m_out1", "pll_m", 240000000, true },
{ "sclk", "pll_m_out1", 240000000, true },
{ "hclk", "sclk", 240000000, true },
{ "pclk", "hclk", 120000000, true },
{ "pll_m_out1", "pll_m", 120000000, true },
{ "sclk", "pll_m_out1", 120000000, true },
{ "hclk", "sclk", 120000000, true },
{ "pclk", "hclk", 60000000, true },
{ "pll_x", NULL, 0, true },
{ "cpu", NULL, 0, true },
{ "emc", NULL, 0, true },

View File

@@ -42,21 +42,11 @@ struct dvfs_reg {
int millivolts;
};
static LIST_HEAD(dvfs_list);
static LIST_HEAD(dvfs_debug_list);
static LIST_HEAD(dvfs_reg_list);
static DEFINE_MUTEX(dvfs_lock);
void lock_dvfs(void)
{
mutex_lock(&dvfs_lock);
}
void unlock_dvfs(void)
{
mutex_unlock(&dvfs_lock);
}
static DEFINE_MUTEX(dvfs_debug_list_lock);
static DEFINE_MUTEX(dvfs_reg_list_lock);
static int dvfs_reg_set_voltage(struct dvfs_reg *dvfs_reg)
{
@@ -71,46 +61,53 @@ static int dvfs_reg_set_voltage(struct dvfs_reg *dvfs_reg)
dvfs_reg->millivolts = millivolts;
if (!dvfs_reg->reg) {
pr_warn("dvfs set voltage on %s ignored\n", dvfs_reg->reg_id);
return 0;
}
return regulator_set_voltage(dvfs_reg->reg,
millivolts * 1000, dvfs_reg->max_millivolts * 1000);
}
static int dvfs_reg_get_voltage(struct dvfs_reg *dvfs_reg)
static int dvfs_reg_connect_to_regulator(struct dvfs_reg *dvfs_reg)
{
int ret = regulator_get_voltage(dvfs_reg->reg);
struct regulator *reg;
if (ret > 0)
return ret / 1000;
if (!dvfs_reg->reg) {
reg = regulator_get(NULL, dvfs_reg->reg_id);
if (IS_ERR(reg))
return -EINVAL;
}
return ret;
dvfs_reg->reg = reg;
return 0;
}
static struct dvfs_reg *get_dvfs_reg(struct dvfs *d)
{
struct dvfs_reg *dvfs_reg;
struct regulator *reg;
mutex_lock(&dvfs_reg_list_lock);
list_for_each_entry(dvfs_reg, &dvfs_reg_list, node)
if (!strcmp(d->reg_id, dvfs_reg->reg_id))
return dvfs_reg;
reg = regulator_get(NULL, d->reg_id);
if (IS_ERR(reg))
return NULL;
goto out;
dvfs_reg = kzalloc(sizeof(struct dvfs_reg), GFP_KERNEL);
if (!dvfs_reg) {
pr_err("%s: Failed to allocate dvfs_reg\n", __func__);
regulator_put(reg);
return NULL;
goto out;
}
INIT_LIST_HEAD(&dvfs_reg->dvfs);
dvfs_reg->reg = reg;
dvfs_reg->reg_id = kstrdup(d->reg_id, GFP_KERNEL);
list_add_tail(&dvfs_reg->node, &dvfs_reg_list);
out:
mutex_unlock(&dvfs_reg_list_lock);
return dvfs_reg;
}
@@ -127,7 +124,7 @@ static struct dvfs_reg *attach_dvfs_reg(struct dvfs *d)
if (d->max_millivolts > d->dvfs_reg->max_millivolts)
d->dvfs_reg->max_millivolts = d->max_millivolts;
d->cur_millivolts = dvfs_reg_get_voltage(d->dvfs_reg);
d->cur_millivolts = d->max_millivolts;
return dvfs_reg;
}
@@ -177,7 +174,7 @@ int tegra_dvfs_set_rate(struct clk *c, unsigned long rate)
c->dvfs_rate = rate;
freq_up = (c->refcnt == 0) || (rate > c->rate);
freq_up = (c->refcnt == 0) || (rate > clk_get_rate_locked(c));
list_for_each_entry(d, &c->dvfs, node) {
if (d->higher == freq_up)
@@ -197,7 +194,8 @@ int tegra_dvfs_set_rate(struct clk *c, unsigned long rate)
}
EXPORT_SYMBOL(tegra_dvfs_set_rate);
int tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d)
/* May only be called during clock init, does not take any locks on clock c. */
int __init tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d)
{
int i;
struct dvfs_reg *dvfs_reg;
@@ -221,30 +219,38 @@ int tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d)
}
d->num_freqs = i;
if (d->auto_dvfs)
if (d->auto_dvfs) {
c->auto_dvfs = true;
clk_set_cansleep(c);
}
c->is_dvfs = true;
smp_wmb();
list_add_tail(&d->node, &c->dvfs);
mutex_lock(&dvfs_debug_list_lock);
list_add_tail(&d->debug_node, &dvfs_debug_list);
mutex_unlock(&dvfs_debug_list_lock);
return 0;
}
int __init tegra_init_dvfs(void)
/*
* Iterate through all the dvfs regulators, finding the regulator exported
* by the regulator api for each one. Must be called in late init, after
* all the regulator api's regulators are initialized.
*/
int __init tegra_dvfs_late_init(void)
{
lock_dvfs();
tegra2_init_dvfs();
struct dvfs_reg *dvfs_reg;
tegra_clk_set_dvfs_rates();
unlock_dvfs();
mutex_lock(&dvfs_reg_list_lock);
list_for_each_entry(dvfs_reg, &dvfs_reg_list, node)
dvfs_reg_connect_to_regulator(dvfs_reg);
mutex_unlock(&dvfs_reg_list_lock);
return 0;
}
late_initcall(tegra_init_dvfs);
#ifdef CONFIG_DEBUG_FS
static int dvfs_tree_sort_cmp(void *p, struct list_head *a, struct list_head *b)
@@ -273,7 +279,7 @@ static int dvfs_tree_show(struct seq_file *s, void *data)
seq_printf(s, " clock rate mV\n");
seq_printf(s, "--------------------------------\n");
lock_dvfs();
mutex_lock(&dvfs_debug_list_lock);
list_sort(NULL, &dvfs_debug_list, dvfs_tree_sort_cmp);
@@ -288,7 +294,7 @@ static int dvfs_tree_show(struct seq_file *s, void *data)
d->cur_rate, d->cur_millivolts);
}
unlock_dvfs();
mutex_unlock(&dvfs_debug_list_lock);
return 0;
}

View File

@@ -49,11 +49,9 @@ struct dvfs {
struct list_head reg_node;
};
void lock_dvfs(void);
void unlock_dvfs(void);
void tegra2_init_dvfs(void);
int tegra_enable_dvfs_on_clk(struct clk *c, struct dvfs *d);
int dvfs_debugfs_init(struct dentry *clk_debugfs_root);
int tegra_dvfs_late_init(void);
#endif

View File

@@ -26,5 +26,6 @@ void tegra_periph_reset_deassert(struct clk *c);
void tegra_periph_reset_assert(struct clk *c);
int tegra_dvfs_set_rate(struct clk *c, unsigned long rate);
unsigned long clk_get_rate_all_locked(struct clk *c);
#endif

View File

@@ -46,6 +46,7 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <mach/clk.h>
#include <mach/iomap.h>
#include <mach/iovmm.h>
#include <mach/irqs.h>
@@ -358,7 +359,7 @@ unsigned int tegra_suspend_lp2(unsigned int us)
writel(virt_to_phys(tegra_lp2_startup), evp_reset);
set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer,
clk_get_rate(tegra_pclk));
clk_get_rate_all_locked(tegra_pclk));
if (us)
tegra_lp2_set_trigger(us);

View File

@@ -23,7 +23,7 @@
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/hrtimer.h>
#include <linux/clk.h>
#include <asm/clkdev.h>
@@ -337,12 +337,12 @@ static int tegra2_super_clk_set_parent(struct clk *c, struct clk *p)
val |= sel->value << shift;
if (c->refcnt)
clk_enable_locked(p);
clk_enable(p);
clk_writel(val, c->reg);
if (c->refcnt && c->parent)
clk_disable_locked(c->parent);
clk_disable(c->parent);
clk_reparent(c, p);
return 0;
@@ -351,11 +351,24 @@ static int tegra2_super_clk_set_parent(struct clk *c, struct clk *p)
return -EINVAL;
}
/*
* Super clocks have "clock skippers" instead of dividers. Dividing using
* a clock skipper does not allow the voltage to be scaled down, so instead
* adjust the rate of the parent clock. This requires that the parent of a
* super clock have no other children, otherwise the rate will change
* underneath the other children.
*/
static int tegra2_super_clk_set_rate(struct clk *c, unsigned long rate)
{
return clk_set_rate(c->parent, rate);
}
static struct clk_ops tegra_super_ops = {
.init = tegra2_super_clk_init,
.enable = tegra2_super_clk_enable,
.disable = tegra2_super_clk_disable,
.set_parent = tegra2_super_clk_set_parent,
.set_rate = tegra2_super_clk_set_rate,
};
/* virtual cpu clock functions */
@@ -389,31 +402,31 @@ static int tegra2_cpu_clk_set_rate(struct clk *c, unsigned long rate)
* Take an extra reference to the main pll so it doesn't turn
* off when we move the cpu off of it
*/
clk_enable_locked(c->u.cpu.main);
clk_enable(c->u.cpu.main);
ret = clk_set_parent_locked(c->parent, c->u.cpu.backup);
ret = clk_set_parent(c->parent, c->u.cpu.backup);
if (ret) {
pr_err("Failed to switch cpu to clock %s\n", c->u.cpu.backup->name);
goto out;
}
if (rate == c->u.cpu.backup->rate)
if (rate == clk_get_rate(c->u.cpu.backup))
goto out;
ret = clk_set_rate_locked(c->u.cpu.main, rate);
ret = clk_set_rate(c->u.cpu.main, rate);
if (ret) {
pr_err("Failed to change cpu pll to %lu\n", rate);
goto out;
}
ret = clk_set_parent_locked(c->parent, c->u.cpu.main);
ret = clk_set_parent(c->parent, c->u.cpu.main);
if (ret) {
pr_err("Failed to switch cpu to clock %s\n", c->u.cpu.main->name);
goto out;
}
out:
clk_disable_locked(c->u.cpu.main);
clk_disable(c->u.cpu.main);
return ret;
}
@@ -465,7 +478,7 @@ static void tegra2_bus_clk_disable(struct clk *c)
static int tegra2_bus_clk_set_rate(struct clk *c, unsigned long rate)
{
u32 val = clk_readl(c->reg);
unsigned long parent_rate = c->parent->rate;
unsigned long parent_rate = clk_get_rate(c->parent);
int i;
for (i = 1; i <= 4; i++) {
if (rate == parent_rate / i) {
@@ -539,14 +552,15 @@ static void tegra2_blink_clk_disable(struct clk *c)
static int tegra2_blink_clk_set_rate(struct clk *c, unsigned long rate)
{
if (rate >= c->parent->rate) {
unsigned long parent_rate = clk_get_rate(c->parent);
if (rate >= parent_rate) {
c->div = 1;
pmc_writel(0, c->reg);
} else {
unsigned int on_off;
u32 val;
on_off = DIV_ROUND_UP(c->parent->rate / 8, rate);
on_off = DIV_ROUND_UP(parent_rate / 8, rate);
c->div = on_off * 8;
val = (on_off & PMC_BLINK_TIMER_DATA_ON_MASK) <<
@@ -632,7 +646,7 @@ static int tegra2_pll_clk_set_rate(struct clk *c, unsigned long rate)
pr_debug("%s: %s %lu\n", __func__, c->name, rate);
input_rate = c->parent->rate;
input_rate = clk_get_rate(c->parent);
for (sel = c->u.pll.freq_table; sel->input_rate != 0; sel++) {
if (sel->input_rate == input_rate && sel->output_rate == rate) {
c->mul = sel->n;
@@ -772,9 +786,11 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate)
u32 val;
u32 new_val;
int divider_u71;
unsigned long parent_rate = clk_get_rate(c->parent);
pr_debug("%s: %s %lu\n", __func__, c->name, rate);
if (c->flags & DIV_U71) {
divider_u71 = clk_div71_get_divider(c->parent->rate, rate);
divider_u71 = clk_div71_get_divider(parent_rate, rate);
if (divider_u71 >= 0) {
val = clk_readl(c->reg);
new_val = val >> c->reg_shift;
@@ -792,7 +808,7 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate)
return 0;
}
} else if (c->flags & DIV_2) {
if (c->parent->rate == rate * 2)
if (parent_rate == rate * 2)
return 0;
}
return -EINVAL;
@@ -801,15 +817,16 @@ static int tegra2_pll_div_clk_set_rate(struct clk *c, unsigned long rate)
static long tegra2_pll_div_clk_round_rate(struct clk *c, unsigned long rate)
{
int divider;
unsigned long parent_rate = clk_get_rate(c->parent);
pr_debug("%s: %s %lu\n", __func__, c->name, rate);
if (c->flags & DIV_U71) {
divider = clk_div71_get_divider(c->parent->rate, rate);
divider = clk_div71_get_divider(parent_rate, rate);
if (divider < 0)
return divider;
return c->parent->rate * 2 / (divider + 2);
return parent_rate * 2 / (divider + 2);
} else if (c->flags & DIV_2) {
return c->parent->rate / 2;
return parent_rate / 2;
}
return -EINVAL;
}
@@ -923,12 +940,12 @@ static int tegra2_periph_clk_set_parent(struct clk *c, struct clk *p)
val |= (sel->value) << PERIPH_CLK_SOURCE_SHIFT;
if (c->refcnt)
clk_enable_locked(p);
clk_enable(p);
clk_writel(val, c->reg);
if (c->refcnt && c->parent)
clk_disable_locked(c->parent);
clk_disable(c->parent);
clk_reparent(c, p);
return 0;
@@ -942,9 +959,10 @@ static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate)
{
u32 val;
int divider;
pr_debug("%s: %lu\n", __func__, rate);
unsigned long parent_rate = clk_get_rate(c->parent);
if (c->flags & DIV_U71) {
divider = clk_div71_get_divider(c->parent->rate, rate);
divider = clk_div71_get_divider(parent_rate, rate);
if (divider >= 0) {
val = clk_readl(c->reg);
val &= ~PERIPH_CLK_SOURCE_DIVU71_MASK;
@@ -955,7 +973,7 @@ static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate)
return 0;
}
} else if (c->flags & DIV_U16) {
divider = clk_div16_get_divider(c->parent->rate, rate);
divider = clk_div16_get_divider(parent_rate, rate);
if (divider >= 0) {
val = clk_readl(c->reg);
val &= ~PERIPH_CLK_SOURCE_DIVU16_MASK;
@@ -965,7 +983,7 @@ static int tegra2_periph_clk_set_rate(struct clk *c, unsigned long rate)
c->mul = 1;
return 0;
}
} else if (c->parent->rate <= rate) {
} else if (parent_rate <= rate) {
c->div = 1;
c->mul = 1;
return 0;
@@ -977,19 +995,20 @@ static long tegra2_periph_clk_round_rate(struct clk *c,
unsigned long rate)
{
int divider;
unsigned long parent_rate = clk_get_rate(c->parent);
pr_debug("%s: %s %lu\n", __func__, c->name, rate);
if (c->flags & DIV_U71) {
divider = clk_div71_get_divider(c->parent->rate, rate);
divider = clk_div71_get_divider(parent_rate, rate);
if (divider < 0)
return divider;
return c->parent->rate * 2 / (divider + 2);
return parent_rate * 2 / (divider + 2);
} else if (c->flags & DIV_U16) {
divider = clk_div16_get_divider(c->parent->rate, rate);
divider = clk_div16_get_divider(parent_rate, rate);
if (divider < 0)
return divider;
return c->parent->rate / (divider + 1);
return parent_rate / (divider + 1);
}
return -EINVAL;
}
@@ -1017,7 +1036,7 @@ static void tegra2_clk_double_init(struct clk *c)
static int tegra2_clk_double_set_rate(struct clk *c, unsigned long rate)
{
if (rate != 2 * c->parent->rate)
if (rate != 2 * clk_get_rate(c->parent))
return -EINVAL;
c->mul = 2;
c->div = 1;
@@ -1068,12 +1087,12 @@ static int tegra2_audio_sync_clk_set_parent(struct clk *c, struct clk *p)
val |= sel->value;
if (c->refcnt)
clk_enable_locked(p);
clk_enable(p);
clk_writel(val, c->reg);
if (c->refcnt && c->parent)
clk_disable_locked(c->parent);
clk_disable(c->parent);
clk_reparent(c, p);
return 0;
@@ -1083,30 +1102,10 @@ static int tegra2_audio_sync_clk_set_parent(struct clk *c, struct clk *p)
return -EINVAL;
}
static int tegra2_audio_sync_clk_set_rate(struct clk *c, unsigned long rate)
{
unsigned long parent_rate;
if (!c->parent) {
pr_err("%s: clock has no parent\n", __func__);
return -EINVAL;
}
parent_rate = c->parent->rate;
if (rate != parent_rate) {
pr_err("%s: %s/%ld differs from parent %s/%ld\n",
__func__,
c->name, rate,
c->parent->name, parent_rate);
return -EINVAL;
}
c->rate = parent_rate;
return 0;
}
static struct clk_ops tegra_audio_sync_clk_ops = {
.init = tegra2_audio_sync_clk_init,
.enable = tegra2_audio_sync_clk_enable,
.disable = tegra2_audio_sync_clk_disable,
.set_rate = tegra2_audio_sync_clk_set_rate,
.set_parent = tegra2_audio_sync_clk_set_parent,
};
@@ -1154,12 +1153,13 @@ static void tegra_clk_shared_bus_update(struct clk *bus)
struct clk *c;
unsigned long rate = bus->u.shared_bus.min_rate;
list_for_each_entry(c, &bus->u.shared_bus.list, u.shared_bus_user.node)
list_for_each_entry(c, &bus->u.shared_bus.list,
u.shared_bus_user.node) {
if (c->u.shared_bus_user.enabled)
rate = max(c->u.shared_bus_user.rate, rate);
}
if (rate != bus->rate)
clk_set_rate_locked(bus, rate);
clk_set_rate(bus, rate);
};
static void tegra_clk_shared_bus_init(struct clk *c)

View File

@@ -18,6 +18,7 @@
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
#include "clock.h"
@@ -81,10 +82,20 @@ static struct dvfs dvfs_init[] = {
CPU_DVFS("cpu", 3, MHZ, 730, 760, 845, 845, 1000),
/* Core voltages (mV): 950, 1000, 1100, 1200, 1275 */
#if 0
/*
* The sdhci core calls the clock ops with a spinlock held, which
* conflicts with the sleeping dvfs api.
* For now, boards must ensure that the core voltage does not drop
* below 1V, or that the sdmmc busses are set to 44 MHz or less.
*/
CORE_DVFS("sdmmc1", 1, KHZ, 44000, 52000, 52000, 52000, 52000),
CORE_DVFS("sdmmc2", 1, KHZ, 44000, 52000, 52000, 52000, 52000),
CORE_DVFS("sdmmc3", 1, KHZ, 44000, 52000, 52000, 52000, 52000),
CORE_DVFS("sdmmc4", 1, KHZ, 44000, 52000, 52000, 52000, 52000),
#endif
CORE_DVFS("ndflash", 1, KHZ, 130000, 150000, 158000, 164000, 164000),
CORE_DVFS("nor", 1, KHZ, 0, 92000, 92000, 92000, 92000),
CORE_DVFS("ide", 1, KHZ, 0, 0, 100000, 100000, 100000),
@@ -122,7 +133,7 @@ static struct dvfs dvfs_init[] = {
CORE_DVFS("NVRM_DEVID_CLK_SRC", 1, MHZ, 480, 600, 800, 1067, 1067),
};
void tegra2_init_dvfs(void)
void __init tegra2_init_dvfs(void)
{
int i;
struct clk *c;

View File

@@ -416,13 +416,9 @@ static void tegra_tx_dma_complete_work(struct work_struct *work)
struct tegra_uart_port *t =
container_of(work, struct tegra_uart_port, tx_work);
struct tegra_dma_req *req = &t->tx_dma_req;
struct circ_buf *xmit = &t->uport.state->xmit;
int count = req->bytes_transferred;
unsigned long flags;
int timeout = 20;
dev_vdbg(t->uport.dev, "%s: %d\n", __func__, count);
while ((uart_readb(t, UART_LSR) & TX_EMPTY_STATUS) != TX_EMPTY_STATUS) {
timeout--;
if (timeout == 0) {
@@ -434,11 +430,8 @@ static void tegra_tx_dma_complete_work(struct work_struct *work)
}
spin_lock_irqsave(&t->uport.lock, flags);
xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
t->tx_in_progress = 0;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&t->uport);
t->tx_in_progress = 0;
if (req->status != -TEGRA_DMA_REQ_ERROR_ABORTED)
tegra_start_next_tx(t);
@@ -449,7 +442,21 @@ static void tegra_tx_dma_complete_work(struct work_struct *work)
static void tegra_tx_dma_complete_callback(struct tegra_dma_req *req)
{
struct tegra_uart_port *t = req->dev;
struct circ_buf *xmit = &t->uport.state->xmit;
int count = req->bytes_transferred;
unsigned long flags;
dev_vdbg(t->uport.dev, "%s: %d\n", __func__, count);
spin_lock_irqsave(&t->uport.lock, flags);
xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&t->uport);
schedule_work(&t->tx_work);
spin_unlock_irqrestore(&t->uport.lock, flags);
}
static irqreturn_t tegra_uart_isr(int irq, void *data)
@@ -552,6 +559,9 @@ static void tegra_stop_rx(struct uart_port *u)
static void tegra_uart_hw_deinit(struct tegra_uart_port *t)
{
unsigned char fcr;
unsigned long flags;
flush_work(&t->tx_work);
/* Disable interrupts */
uart_writeb(t, 0, UART_IER);
@@ -559,6 +569,8 @@ static void tegra_uart_hw_deinit(struct tegra_uart_port *t)
while ((uart_readb(t, UART_LSR) & UART_LSR_TEMT) != UART_LSR_TEMT);
udelay(200);
spin_lock_irqsave(&t->uport.lock, flags);
/* Reset the Rx and Tx FIFOs */
fcr = t->fcr_shadow;
fcr |= UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR;
@@ -568,6 +580,8 @@ static void tegra_uart_hw_deinit(struct tegra_uart_port *t)
clk_disable(t->clk);
t->baud = 0;
spin_unlock_irqrestore(&t->uport.lock, flags);
}
static void tegra_uart_free_rx_dma(struct tegra_uart_port *t)
@@ -805,14 +819,11 @@ fail:
static void tegra_shutdown(struct uart_port *u)
{
struct tegra_uart_port *t;
unsigned long flags;
spin_lock_irqsave(&u->lock, flags);
t = container_of(u, struct tegra_uart_port, uport);
dev_vdbg(u->dev, "+tegra_shutdown\n");
tegra_uart_hw_deinit(t);
spin_unlock_irqrestore(&u->lock, flags);
t->rx_in_progress = 0;
t->tx_in_progress = 0;
@@ -947,11 +958,9 @@ static void tegra_stop_tx(struct uart_port *u)
t = container_of(u, struct tegra_uart_port, uport);
if (t->use_tx_dma) {
if (t->use_tx_dma)
tegra_dma_dequeue_req(t->tx_dma, &t->tx_dma_req);
flush_work(&t->tx_work);
}
t->tx_in_progress = 0;
return;
}
@@ -1170,6 +1179,8 @@ static int tegra_uart_suspend(struct platform_device *pdev, pm_message_t state)
u = &t->uport;
uart_suspend_port(&tegra_uart_driver, u);
flush_work(&t->tx_work);
return 0;
}