diff options
Diffstat (limited to 'drivers/clk')
-rw-r--r-- | drivers/clk/Kconfig | 12 | ||||
-rw-r--r-- | drivers/clk/Makefile | 2 | ||||
-rw-r--r-- | drivers/clk/clk-divider.c | 68 | ||||
-rw-r--r-- | drivers/clk/clk-fixed-factor.c | 95 | ||||
-rw-r--r-- | drivers/clk/clk-fixed-rate.c | 49 | ||||
-rw-r--r-- | drivers/clk/clk-gate.c | 104 | ||||
-rw-r--r-- | drivers/clk/clk-mux.c | 27 | ||||
-rw-r--r-- | drivers/clk/clk.c | 270 | ||||
-rw-r--r-- | drivers/clk/clkdev.c | 142 |
9 files changed, 531 insertions, 238 deletions
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 165e1febae53..4864407e3fc4 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -12,6 +12,7 @@ config HAVE_MACH_CLKDEV config COMMON_CLK bool select HAVE_CLK_PREPARE + select CLKDEV_LOOKUP ---help--- The common clock framework is a single definition of struct clk, useful across many platforms, as well as an @@ -22,17 +23,6 @@ config COMMON_CLK menu "Common Clock Framework" depends on COMMON_CLK -config COMMON_CLK_DISABLE_UNUSED - bool "Disabled unused clocks at boot" - depends on COMMON_CLK - ---help--- - Traverses the entire clock tree and disables any clocks that are - enabled in hardware but have not been enabled by any device drivers. - This saves power and keeps the software model of the clock in line - with reality. - - If in doubt, say "N". - config COMMON_CLK_DEBUG bool "DebugFS representation of clock tree" depends on COMMON_CLK diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index a576f5447d38..4a5bdbc30389 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -1,6 +1,6 @@ obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \ - clk-mux.o clk-divider.o + clk-mux.o clk-divider.o clk-fixed-factor.o obj-$(CONFIG_ARCH_MXS) += mxs/ diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index d5ac6a75ea57..8ea11b444528 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -45,7 +45,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, return parent_rate / div; } -EXPORT_SYMBOL_GPL(clk_divider_recalc_rate); /* * The reverse of DIV_ROUND_UP: The maximum number which @@ -68,8 +67,8 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, if (divider->flags & CLK_DIVIDER_ONE_BASED) maxdiv--; - if (!best_parent_rate) { - parent_rate = __clk_get_rate(__clk_get_parent(hw->clk)); + if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { + parent_rate = *best_parent_rate; bestdiv = DIV_ROUND_UP(parent_rate, rate); bestdiv = bestdiv == 0 ? 1 : bestdiv; bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; @@ -109,24 +108,18 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, int div; div = clk_divider_bestdiv(hw, rate, prate); - if (prate) - return *prate / div; - else { - unsigned long r; - r = __clk_get_rate(__clk_get_parent(hw->clk)); - return r / div; - } + return *prate / div; } -EXPORT_SYMBOL_GPL(clk_divider_round_rate); -static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate) +static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) { struct clk_divider *divider = to_clk_divider(hw); unsigned int div; unsigned long flags = 0; u32 val; - div = __clk_get_rate(__clk_get_parent(hw->clk)) / rate; + div = parent_rate / rate; if (!(divider->flags & CLK_DIVIDER_ONE_BASED)) div--; @@ -147,15 +140,26 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate) return 0; } -EXPORT_SYMBOL_GPL(clk_divider_set_rate); -struct clk_ops clk_divider_ops = { +const struct clk_ops clk_divider_ops = { .recalc_rate = clk_divider_recalc_rate, .round_rate = clk_divider_round_rate, .set_rate = clk_divider_set_rate, }; EXPORT_SYMBOL_GPL(clk_divider_ops); +/** + * clk_register_divider - register a divider clock with the clock framework + * @dev: device registering this clock + * @name: name of this clock + * @parent_name: name of clock's parent + * @flags: framework-specific flags + * @reg: register address to adjust divider + * @shift: number of bits to shift the bitfield + * @width: width of the bitfield + * @clk_divider_flags: divider-specific flags for this clock + * @lock: shared register lock for this clock + */ struct clk *clk_register_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, @@ -163,38 +167,34 @@ struct clk *clk_register_divider(struct device *dev, const char *name, { struct clk_divider *div; struct clk *clk; + struct clk_init_data init; + /* allocate the divider */ div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL); - if (!div) { pr_err("%s: could not allocate divider clk\n", __func__); - return NULL; + return ERR_PTR(-ENOMEM); } + init.name = name; + init.ops = &clk_divider_ops; + init.flags = flags; + init.parent_names = (parent_name ? &parent_name: NULL); + init.num_parents = (parent_name ? 1 : 0); + /* struct clk_divider assignments */ div->reg = reg; div->shift = shift; div->width = width; div->flags = clk_divider_flags; div->lock = lock; + div->hw.init = &init; - if (parent_name) { - div->parent[0] = kstrdup(parent_name, GFP_KERNEL); - if (!div->parent[0]) - goto out; - } - - clk = clk_register(dev, name, - &clk_divider_ops, &div->hw, - div->parent, - (parent_name ? 1 : 0), - flags); - if (clk) - return clk; + /* register the clock */ + clk = clk_register(dev, &div->hw); -out: - kfree(div->parent[0]); - kfree(div); + if (IS_ERR(clk)) + kfree(div); - return NULL; + return clk; } diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c new file mode 100644 index 000000000000..c8c003e217ad --- /dev/null +++ b/drivers/clk/clk-fixed-factor.c @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Standard functionality for the common clock API. + */ +#include <linux/module.h> +#include <linux/clk-provider.h> +#include <linux/slab.h> +#include <linux/err.h> + +/* + * DOC: basic fixed multiplier and divider clock that cannot gate + * + * Traits of this clock: + * prepare - clk_prepare only ensures that parents are prepared + * enable - clk_enable only ensures that parents are enabled + * rate - rate is fixed. clk->rate = parent->rate / div * mult + * parent - fixed parent. No clk_set_parent support + */ + +#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw) + +static unsigned long clk_factor_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_fixed_factor *fix = to_clk_fixed_factor(hw); + + return parent_rate * fix->mult / fix->div; +} + +static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_fixed_factor *fix = to_clk_fixed_factor(hw); + + if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { + unsigned long best_parent; + + best_parent = (rate / fix->mult) * fix->div; + *prate = __clk_round_rate(__clk_get_parent(hw->clk), + best_parent); + } + + return (*prate / fix->div) * fix->mult; +} + +static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + return 0; +} + +struct clk_ops clk_fixed_factor_ops = { + .round_rate = clk_factor_round_rate, + .set_rate = clk_factor_set_rate, + .recalc_rate = clk_factor_recalc_rate, +}; +EXPORT_SYMBOL_GPL(clk_fixed_factor_ops); + +struct clk *clk_register_fixed_factor(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + unsigned int mult, unsigned int div) +{ + struct clk_fixed_factor *fix; + struct clk_init_data init; + struct clk *clk; + + fix = kmalloc(sizeof(*fix), GFP_KERNEL); + if (!fix) { + pr_err("%s: could not allocate fixed factor clk\n", __func__); + return ERR_PTR(-ENOMEM); + } + + /* struct clk_fixed_factor assignments */ + fix->mult = mult; + fix->div = div; + fix->hw.init = &init; + + init.name = name; + init.ops = &clk_fixed_factor_ops; + init.flags = flags; + init.parent_names = &parent_name; + init.num_parents = 1; + + clk = clk_register(dev, &fix->hw); + + if (IS_ERR(clk)) + kfree(fix); + + return clk; +} diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c index 90c79fb5d1bd..cbd246229786 100644 --- a/drivers/clk/clk-fixed-rate.c +++ b/drivers/clk/clk-fixed-rate.c @@ -32,51 +32,50 @@ static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw, { return to_clk_fixed_rate(hw)->fixed_rate; } -EXPORT_SYMBOL_GPL(clk_fixed_rate_recalc_rate); -struct clk_ops clk_fixed_rate_ops = { +const struct clk_ops clk_fixed_rate_ops = { .recalc_rate = clk_fixed_rate_recalc_rate, }; EXPORT_SYMBOL_GPL(clk_fixed_rate_ops); +/** + * clk_register_fixed_rate - register fixed-rate clock with the clock framework + * @dev: device that is registering this clock + * @name: name of this clock + * @parent_name: name of clock's parent + * @flags: framework-specific flags + * @fixed_rate: non-adjustable clock rate + */ struct clk *clk_register_fixed_rate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned long fixed_rate) { struct clk_fixed_rate *fixed; - char **parent_names = NULL; - u8 len; + struct clk *clk; + struct clk_init_data init; + /* allocate fixed-rate clock */ fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL); - if (!fixed) { pr_err("%s: could not allocate fixed clk\n", __func__); return ERR_PTR(-ENOMEM); } + init.name = name; + init.ops = &clk_fixed_rate_ops; + init.flags = flags; + init.parent_names = (parent_name ? &parent_name: NULL); + init.num_parents = (parent_name ? 1 : 0); + /* struct clk_fixed_rate assignments */ fixed->fixed_rate = fixed_rate; + fixed->hw.init = &init; - if (parent_name) { - parent_names = kmalloc(sizeof(char *), GFP_KERNEL); - - if (! parent_names) - goto out; + /* register the clock */ + clk = clk_register(dev, &fixed->hw); - len = sizeof(char) * strlen(parent_name); - - parent_names[0] = kmalloc(len, GFP_KERNEL); - - if (!parent_names[0]) - goto out; - - strncpy(parent_names[0], parent_name, len); - } + if (IS_ERR(clk)) + kfree(fixed); -out: - return clk_register(dev, name, - &clk_fixed_rate_ops, &fixed->hw, - parent_names, - (parent_name ? 1 : 0), - flags); + return clk; } diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c index b5902e2ef2fd..578465e04be6 100644 --- a/drivers/clk/clk-gate.c +++ b/drivers/clk/clk-gate.c @@ -28,32 +28,38 @@ #define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw) -static void clk_gate_set_bit(struct clk_gate *gate) +/* + * It works on following logic: + * + * For enabling clock, enable = 1 + * set2dis = 1 -> clear bit -> set = 0 + * set2dis = 0 -> set bit -> set = 1 + * + * For disabling clock, enable = 0 + * set2dis = 1 -> set bit -> set = 1 + * set2dis = 0 -> clear bit -> set = 0 + * + * So, result is always: enable xor set2dis. + */ +static void clk_gate_endisable(struct clk_hw *hw, int enable) { - u32 reg; + struct clk_gate *gate = to_clk_gate(hw); + int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0; unsigned long flags = 0; + u32 reg; + + set ^= enable; if (gate->lock) spin_lock_irqsave(gate->lock, flags); reg = readl(gate->reg); - reg |= BIT(gate->bit_idx); - writel(reg, gate->reg); - - if (gate->lock) - spin_unlock_irqrestore(gate->lock, flags); -} - -static void clk_gate_clear_bit(struct clk_gate *gate) -{ - u32 reg; - unsigned long flags = 0; - if (gate->lock) - spin_lock_irqsave(gate->lock, flags); + if (set) + reg |= BIT(gate->bit_idx); + else + reg &= ~BIT(gate->bit_idx); - reg = readl(gate->reg); - reg &= ~BIT(gate->bit_idx); writel(reg, gate->reg); if (gate->lock) @@ -62,27 +68,15 @@ static void clk_gate_clear_bit(struct clk_gate *gate) static int clk_gate_enable(struct clk_hw *hw) { - struct clk_gate *gate = to_clk_gate(hw); - - if (gate->flags & CLK_GATE_SET_TO_DISABLE) - clk_gate_clear_bit(gate); - else - clk_gate_set_bit(gate); + clk_gate_endisable(hw, 1); return 0; } -EXPORT_SYMBOL_GPL(clk_gate_enable); static void clk_gate_disable(struct clk_hw *hw) { - struct clk_gate *gate = to_clk_gate(hw); - - if (gate->flags & CLK_GATE_SET_TO_DISABLE) - clk_gate_set_bit(gate); - else - clk_gate_clear_bit(gate); + clk_gate_endisable(hw, 0); } -EXPORT_SYMBOL_GPL(clk_gate_disable); static int clk_gate_is_enabled(struct clk_hw *hw) { @@ -99,15 +93,25 @@ static int clk_gate_is_enabled(struct clk_hw *hw) return reg ? 1 : 0; } -EXPORT_SYMBOL_GPL(clk_gate_is_enabled); -struct clk_ops clk_gate_ops = { +const struct clk_ops clk_gate_ops = { .enable = clk_gate_enable, .disable = clk_gate_disable, .is_enabled = clk_gate_is_enabled, }; EXPORT_SYMBOL_GPL(clk_gate_ops); +/** + * clk_register_gate - register a gate clock with the clock framework + * @dev: device that is registering this clock + * @name: name of this clock + * @parent_name: name of this clock's parent + * @flags: framework-specific flags for this clock + * @reg: register address to control gating of this clock + * @bit_idx: which bit in the register controls gating of this clock + * @clk_gate_flags: gate-specific flags for this clock + * @lock: shared register lock for this clock + */ struct clk *clk_register_gate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 bit_idx, @@ -115,36 +119,32 @@ struct clk *clk_register_gate(struct device *dev, const char *name, { struct clk_gate *gate; struct clk *clk; + struct clk_init_data init; + /* allocate the gate */ gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL); - if (!gate) { pr_err("%s: could not allocate gated clk\n", __func__); - return NULL; + return ERR_PTR(-ENOMEM); } + init.name = name; + init.ops = &clk_gate_ops; + init.flags = flags; + init.parent_names = (parent_name ? &parent_name: NULL); + init.num_parents = (parent_name ? 1 : 0); + /* struct clk_gate assignments */ gate->reg = reg; gate->bit_idx = bit_idx; gate->flags = clk_gate_flags; gate->lock = lock; + gate->hw.init = &init; - if (parent_name) { - gate->parent[0] = kstrdup(parent_name, GFP_KERNEL); - if (!gate->parent[0]) - goto out; - } + clk = clk_register(dev, &gate->hw); + + if (IS_ERR(clk)) + kfree(gate); - clk = clk_register(dev, name, - &clk_gate_ops, &gate->hw, - gate->parent, - (parent_name ? 1 : 0), - flags); - if (clk) - return clk; -out: - kfree(gate->parent[0]); - kfree(gate); - - return NULL; + return clk; } diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c index c71ad1f41a97..fd36a8ea73d9 100644 --- a/drivers/clk/clk-mux.c +++ b/drivers/clk/clk-mux.c @@ -55,7 +55,6 @@ static u8 clk_mux_get_parent(struct clk_hw *hw) return val; } -EXPORT_SYMBOL_GPL(clk_mux_get_parent); static int clk_mux_set_parent(struct clk_hw *hw, u8 index) { @@ -82,35 +81,47 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index) return 0; } -EXPORT_SYMBOL_GPL(clk_mux_set_parent); -struct clk_ops clk_mux_ops = { +const struct clk_ops clk_mux_ops = { .get_parent = clk_mux_get_parent, .set_parent = clk_mux_set_parent, }; EXPORT_SYMBOL_GPL(clk_mux_ops); struct clk *clk_register_mux(struct device *dev, const char *name, - char **parent_names, u8 num_parents, unsigned long flags, + const char **parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_mux_flags, spinlock_t *lock) { struct clk_mux *mux; + struct clk *clk; + struct clk_init_data init; - mux = kmalloc(sizeof(struct clk_mux), GFP_KERNEL); - + /* allocate the mux */ + mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL); if (!mux) { pr_err("%s: could not allocate mux clk\n", __func__); return ERR_PTR(-ENOMEM); } + init.name = name; + init.ops = &clk_mux_ops; + init.flags = flags; + init.parent_names = parent_names; + init.num_parents = num_parents; + /* struct clk_mux assignments */ mux->reg = reg; mux->shift = shift; mux->width = width; mux->flags = clk_mux_flags; mux->lock = lock; + mux->hw.init = &init; + + clk = clk_register(dev, &mux->hw); + + if (IS_ERR(clk)) + kfree(mux); - return clk_register(dev, name, &clk_mux_ops, &mux->hw, - parent_names, num_parents, flags); + return clk; } diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 9cf6f59e3e19..e5d5dc13bcfd 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -194,9 +194,8 @@ static int __init clk_debug_init(void) late_initcall(clk_debug_init); #else static inline int clk_debug_register(struct clk *clk) { return 0; } -#endif /* CONFIG_COMMON_CLK_DEBUG */ +#endif -#ifdef CONFIG_COMMON_CLK_DISABLE_UNUSED /* caller must hold prepare_lock */ static void clk_disable_unused_subtree(struct clk *clk) { @@ -246,9 +245,6 @@ static int clk_disable_unused(void) return 0; } late_initcall(clk_disable_unused); -#else -static inline int clk_disable_unused(struct clk *clk) { return 0; } -#endif /* CONFIG_COMMON_CLK_DISABLE_UNUSED */ /*** helper functions ***/ @@ -287,7 +283,7 @@ unsigned long __clk_get_rate(struct clk *clk) unsigned long ret; if (!clk) { - ret = -EINVAL; + ret = 0; goto out; } @@ -297,7 +293,7 @@ unsigned long __clk_get_rate(struct clk *clk) goto out; if (!clk->parent) - ret = -ENODEV; + ret = 0; out: return ret; @@ -562,7 +558,7 @@ EXPORT_SYMBOL_GPL(clk_enable); * @clk: the clk whose rate is being returned * * Simply returns the cached rate of the clk. Does not query the hardware. If - * clk is NULL then returns -EINVAL. + * clk is NULL then returns 0. */ unsigned long clk_get_rate(struct clk *clk) { @@ -584,18 +580,22 @@ EXPORT_SYMBOL_GPL(clk_get_rate); */ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) { - unsigned long unused; + unsigned long parent_rate = 0; if (!clk) return -EINVAL; - if (!clk->ops->round_rate) - return clk->rate; + if (!clk->ops->round_rate) { + if (clk->flags & CLK_SET_RATE_PARENT) + return __clk_round_rate(clk->parent, rate); + else + return clk->rate; + } - if (clk->flags & CLK_SET_RATE_PARENT) - return clk->ops->round_rate(clk->hw, rate, &unused); - else - return clk->ops->round_rate(clk->hw, rate, NULL); + if (clk->parent) + parent_rate = clk->parent->rate; + + return clk->ops->round_rate(clk->hw, rate, &parent_rate); } /** @@ -765,25 +765,41 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate) static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate) { struct clk *top = clk; - unsigned long best_parent_rate = clk->parent->rate; + unsigned long best_parent_rate = 0; unsigned long new_rate; - if (!clk->ops->round_rate && !(clk->flags & CLK_SET_RATE_PARENT)) { - clk->new_rate = clk->rate; + /* sanity */ + if (IS_ERR_OR_NULL(clk)) return NULL; + + /* save parent rate, if it exists */ + if (clk->parent) + best_parent_rate = clk->parent->rate; + + /* never propagate up to the parent */ + if (!(clk->flags & CLK_SET_RATE_PARENT)) { + if (!clk->ops->round_rate) { + clk->new_rate = clk->rate; + return NULL; + } + new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate); + goto out; } - if (!clk->ops->round_rate && (clk->flags & CLK_SET_RATE_PARENT)) { + /* need clk->parent from here on out */ + if (!clk->parent) { + pr_debug("%s: %s has NULL parent\n", __func__, clk->name); + return NULL; + } + + if (!clk->ops->round_rate) { top = clk_calc_new_rates(clk->parent, rate); - new_rate = clk->new_rate = clk->parent->new_rate; + new_rate = clk->parent->new_rate; goto out; } - if (clk->flags & CLK_SET_RATE_PARENT) - new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate); - else - new_rate = clk->ops->round_rate(clk->hw, rate, NULL); + new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate); if (best_parent_rate != clk->parent->rate) { top = clk_calc_new_rates(clk->parent, best_parent_rate); @@ -839,7 +855,7 @@ static void clk_change_rate(struct clk *clk) old_rate = clk->rate; if (clk->ops->set_rate) - clk->ops->set_rate(clk->hw, clk->new_rate); + clk->ops->set_rate(clk->hw, clk->new_rate, clk->parent->rate); if (clk->ops->recalc_rate) clk->rate = clk->ops->recalc_rate(clk->hw, @@ -859,38 +875,19 @@ static void clk_change_rate(struct clk *clk) * @clk: the clk whose rate is being changed * @rate: the new rate for clk * - * In the simplest case clk_set_rate will only change the rate of clk. + * In the simplest case clk_set_rate will only adjust the rate of clk. * - * If clk has the CLK_SET_RATE_GATE flag set and it is enabled this call - * will fail; only when the clk is disabled will it be able to change - * its rate. + * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to + * propagate up to clk's parent; whether or not this happens depends on the + * outcome of clk's .round_rate implementation. If *parent_rate is unchanged + * after calling .round_rate then upstream parent propagation is ignored. If + * *parent_rate comes back with a new rate for clk's parent then we propagate + * up to clk's parent and set it's rate. Upward propagation will continue + * until either a clk does not support the CLK_SET_RATE_PARENT flag or + * .round_rate stops requesting changes to clk's parent_rate. * - * Setting the CLK_SET_RATE_PARENT flag allows clk_set_rate to - * recursively propagate up to clk's parent; whether or not this happens - * depends on the outcome of clk's .round_rate implementation. If - * *parent_rate is 0 after calling .round_rate then upstream parent - * propagation is ignored. If *parent_rate comes back with a new rate - * for clk's parent then we propagate up to clk's parent and set it's - * rate. Upward propagation will continue until either a clk does not - * support the CLK_SET_RATE_PARENT flag or .round_rate stops requesting - * changes to clk's parent_rate. If there is a failure during upstream - * propagation then clk_set_rate will unwind and restore each clk's rate - * that had been successfully changed. Afterwards a rate change abort - * notification will be propagated downstream, starting from the clk - * that failed. - * - * At the end of all of the rate setting, clk_set_rate internally calls - * __clk_recalc_rates and propagates the rate changes downstream, - * starting from the highest clk whose rate was changed. This has the - * added benefit of propagating post-rate change notifiers. - * - * Note that while post-rate change and rate change abort notifications - * are guaranteed to be sent to a clk only once per call to - * clk_set_rate, pre-change notifications will be sent for every clk - * whose rate is changed. Stacking pre-change notifications is noisy - * for the drivers subscribed to them, but this allows drivers to react - * to intermediate clk rate changes up until the point where the final - * rate is achieved at the end of upstream propagation. + * Rate changes are accomplished via tree traversal that also recalculates the + * rates for the clocks and fires off POST_RATE_CHANGE notifiers. * * Returns 0 on success, -EERROR otherwise. */ @@ -906,6 +903,11 @@ int clk_set_rate(struct clk *clk, unsigned long rate) if (rate == clk->rate) goto out; + if ((clk->flags & CLK_SET_RATE_GATE) && __clk_is_enabled(clk)) { + ret = -EBUSY; + goto out; + } + /* calculate new rates and get the topmost changed clock */ top = clk_calc_new_rates(clk, rate); if (!top) { @@ -1175,40 +1177,41 @@ EXPORT_SYMBOL_GPL(clk_set_parent); * * Initializes the lists in struct clk, queries the hardware for the * parent and rate and sets them both. - * - * Any struct clk passed into __clk_init must have the following members - * populated: - * .name - * .ops - * .hw - * .parent_names - * .num_parents - * .flags - * - * Essentially, everything that would normally be passed into clk_register is - * assumed to be initialized already in __clk_init. The other members may be - * populated, but are optional. - * - * __clk_init is only exposed via clk-private.h and is intended for use with - * very large numbers of clocks that need to be statically initialized. It is - * a layering violation to include clk-private.h from any code which implements - * a clock's .ops; as such any statically initialized clock data MUST be in a - * separate C file from the logic that implements it's operations. */ -void __clk_init(struct device *dev, struct clk *clk) +int __clk_init(struct device *dev, struct clk *clk) { - int i; + int i, ret = 0; struct clk *orphan; struct hlist_node *tmp, *tmp2; if (!clk) - return; + return -EINVAL; mutex_lock(&prepare_lock); /* check to see if a clock with this name is already registered */ - if (__clk_lookup(clk->name)) + if (__clk_lookup(clk->name)) { + pr_debug("%s: clk %s already initialized\n", + __func__, clk->name); + ret = -EEXIST; + goto out; + } + + /* check that clk_ops are sane. See Documentation/clk.txt */ + if (clk->ops->set_rate && + !(clk->ops->round_rate && clk->ops->recalc_rate)) { + pr_warning("%s: %s must implement .round_rate & .recalc_rate\n", + __func__, clk->name); + ret = -EINVAL; goto out; + } + + if (clk->ops->set_parent && !clk->ops->get_parent) { + pr_warning("%s: %s must implement .get_parent & .set_parent\n", + __func__, clk->name); + ret = -EINVAL; + goto out; + } /* throw a WARN if any entries in parent_names are NULL */ for (i = 0; i < clk->num_parents; i++) @@ -1302,45 +1305,118 @@ void __clk_init(struct device *dev, struct clk *clk) out: mutex_unlock(&prepare_lock); - return; + return ret; } /** + * __clk_register - register a clock and return a cookie. + * + * Same as clk_register, except that the .clk field inside hw shall point to a + * preallocated (generally statically allocated) struct clk. None of the fields + * of the struct clk need to be initialized. + * + * The data pointed to by .init and .clk field shall NOT be marked as init + * data. + * + * __clk_register is only exposed via clk-private.h and is intended for use with + * very large numbers of clocks that need to be statically initialized. It is + * a layering violation to include clk-private.h from any code which implements + * a clock's .ops; as such any statically initialized clock data MUST be in a + * separate C file from the logic that implements it's operations. Returns 0 + * on success, otherwise an error code. + */ +struct clk *__clk_register(struct device *dev, struct clk_hw *hw) +{ + int ret; + struct clk *clk; + + clk = hw->clk; + clk->name = hw->init->name; + clk->ops = hw->init->ops; + clk->hw = hw; + clk->flags = hw->init->flags; + clk->parent_names = hw->init->parent_names; + clk->num_parents = hw->init->num_parents; + + ret = __clk_init(dev, clk); + if (ret) + return ERR_PTR(ret); + + return clk; +} +EXPORT_SYMBOL_GPL(__clk_register); + +/** * clk_register - allocate a new clock, register it and return an opaque cookie * @dev: device that is registering this clock - * @name: clock name - * @ops: operations this clock supports * @hw: link to hardware-specific clock data - * @parent_names: array of string names for all possible parents - * @num_parents: number of possible parents - * @flags: framework-level hints and quirks * * clk_register is the primary interface for populating the clock tree with new * clock nodes. It returns a pointer to the newly allocated struct clk which * cannot be dereferenced by driver code but may be used in conjuction with the - * rest of the clock API. + * rest of the clock API. In the event of an error clk_register will return an + * error code; drivers must test for an error code after calling clk_register. */ -struct clk *clk_register(struct device *dev, const char *name, - const struct clk_ops *ops, struct clk_hw *hw, - char **parent_names, u8 num_parents, unsigned long flags) +struct clk *clk_register(struct device *dev, struct clk_hw *hw) { + int i, ret; struct clk *clk; clk = kzalloc(sizeof(*clk), GFP_KERNEL); - if (!clk) - return NULL; + if (!clk) { + pr_err("%s: could not allocate clk\n", __func__); + ret = -ENOMEM; + goto fail_out; + } - clk->name = name; - clk->ops = ops; + clk->name = kstrdup(hw->init->name, GFP_KERNEL); + if (!clk->name) { + pr_err("%s: could not allocate clk->name\n", __func__); + ret = -ENOMEM; + goto fail_name; + } + clk->ops = hw->init->ops; clk->hw = hw; - clk->flags = flags; - clk->parent_names = parent_names; - clk->num_parents = num_parents; + clk->flags = hw->init->flags; + clk->num_parents = hw->init->num_parents; hw->clk = clk; - __clk_init(dev, clk); + /* allocate local copy in case parent_names is __initdata */ + clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents), + GFP_KERNEL); - return clk; + if (!clk->parent_names) { + pr_err("%s: could not allocate clk->parent_names\n", __func__); + ret = -ENOMEM; + goto fail_parent_names; + } + + + /* copy each string name in case parent_names is __initdata */ + for (i = 0; i < clk->num_parents; i++) { + clk->parent_names[i] = kstrdup(hw->init->parent_names[i], + GFP_KERNEL); + if (!clk->parent_names[i]) { + pr_err("%s: could not copy parent_names\n", __func__); + ret = -ENOMEM; + goto fail_parent_names_copy; + } + } + + ret = __clk_init(dev, clk); + if (!ret) + return clk; + +fail_parent_names_copy: + while (--i >= 0) + kfree(clk->parent_names[i]); + kfree(clk->parent_names); +fail_parent_names: + kfree(clk->name); +fail_name: + kfree(clk); +fail_out: + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(clk_register); diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index 6db161f64ae0..c535cf8c5770 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -35,7 +35,12 @@ static DEFINE_MUTEX(clocks_mutex); static struct clk_lookup *clk_find(const char *dev_id, const char *con_id) { struct clk_lookup *p, *cl = NULL; - int match, best = 0; + int match, best_found = 0, best_possible = 0; + + if (dev_id) + best_possible += 2; + if (con_id) + best_possible += 1; list_for_each_entry(p, &clocks, node) { match = 0; @@ -50,10 +55,10 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id) match += 1; } - if (match > best) { + if (match > best_found) { cl = p; - if (match != 3) - best = match; + if (match != best_possible) + best_found = match; else break; } @@ -89,6 +94,51 @@ void clk_put(struct clk *clk) } EXPORT_SYMBOL(clk_put); +static void devm_clk_release(struct device *dev, void *res) +{ + clk_put(*(struct clk **)res); +} + +struct clk *devm_clk_get(struct device *dev, const char *id) +{ + struct clk **ptr, *clk; + + ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + clk = clk_get(dev, id); + if (!IS_ERR(clk)) { + *ptr = clk; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return clk; +} +EXPORT_SYMBOL(devm_clk_get); + +static int devm_clk_match(struct device *dev, void *res, void *data) +{ + struct clk **c = res; + if (!c || !*c) { + WARN_ON(!c || !*c); + return 0; + } + return *c == data; +} + +void devm_clk_put(struct device *dev, struct clk *clk) +{ + int ret; + + ret = devres_destroy(dev, devm_clk_release, devm_clk_match, clk); + + WARN_ON(ret); +} +EXPORT_SYMBOL(devm_clk_put); + void clkdev_add(struct clk_lookup *cl) { mutex_lock(&clocks_mutex); @@ -116,8 +166,9 @@ struct clk_lookup_alloc { char con_id[MAX_CON_ID]; }; -struct clk_lookup * __init_refok -clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...) +static struct clk_lookup * __init_refok +vclkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, + va_list ap) { struct clk_lookup_alloc *cla; @@ -132,16 +183,25 @@ clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...) } if (dev_fmt) { - va_list ap; - - va_start(ap, dev_fmt); vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap); cla->cl.dev_id = cla->dev_id; - va_end(ap); } return &cla->cl; } + +struct clk_lookup * __init_refok +clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...) +{ + struct clk_lookup *cl; + va_list ap; + + va_start(ap, dev_fmt); + cl = vclkdev_alloc(clk, con_id, dev_fmt, ap); + va_end(ap); + + return cl; +} EXPORT_SYMBOL(clkdev_alloc); int clk_add_alias(const char *alias, const char *alias_dev_name, char *id, @@ -173,3 +233,65 @@ void clkdev_drop(struct clk_lookup *cl) kfree(cl); } EXPORT_SYMBOL(clkdev_drop); + +/** + * clk_register_clkdev - register one clock lookup for a struct clk + * @clk: struct clk to associate with all clk_lookups + * @con_id: connection ID string on device + * @dev_id: format string describing device name + * + * con_id or dev_id may be NULL as a wildcard, just as in the rest of + * clkdev. + * + * To make things easier for mass registration, we detect error clks + * from a previous clk_register() call, and return the error code for + * those. This is to permit this function to be called immediately + * after clk_register(). + */ +int clk_register_clkdev(struct clk *clk, const char *con_id, + const char *dev_fmt, ...) +{ + struct clk_lookup *cl; + va_list ap; + + if (IS_ERR(clk)) + return PTR_ERR(clk); + + va_start(ap, dev_fmt); + cl = vclkdev_alloc(clk, con_id, dev_fmt, ap); + va_end(ap); + + if (!cl) + return -ENOMEM; + + clkdev_add(cl); + + return 0; +} + +/** + * clk_register_clkdevs - register a set of clk_lookup for a struct clk + * @clk: struct clk to associate with all clk_lookups + * @cl: array of clk_lookup structures with con_id and dev_id pre-initialized + * @num: number of clk_lookup structures to register + * + * To make things easier for mass registration, we detect error clks + * from a previous clk_register() call, and return the error code for + * those. This is to permit this function to be called immediately + * after clk_register(). + */ +int clk_register_clkdevs(struct clk *clk, struct clk_lookup *cl, size_t num) +{ + unsigned i; + + if (IS_ERR(clk)) + return PTR_ERR(clk); + + for (i = 0; i < num; i++, cl++) { + cl->clk = clk; + clkdev_add(cl); + } + + return 0; +} +EXPORT_SYMBOL(clk_register_clkdevs); |