diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-29 03:44:53 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-29 03:44:53 +0100 |
commit | d30492adea3a82e7120bcf60893aaaab711f90a6 (patch) | |
tree | 082d1dff4d71ccbd722b5edd47411acad110b636 /drivers/clk/ti/divider.c | |
parent | Merge tag 'xfs-for-linus-v3.14-rc1-2' of git://oss.sgi.com/xfs/xfs (diff) | |
parent | clk: sort Makefile (diff) | |
download | linux-d30492adea3a82e7120bcf60893aaaab711f90a6.tar.xz linux-d30492adea3a82e7120bcf60893aaaab711f90a6.zip |
Merge tag 'clk-for-linus-3.14-part2' of git://git.linaro.org/people/mike.turquette/linux
Pull more clock framework changes from Mike Turquette:
"The second half of the clock framework pull requeust for 3.14 is
dominated by platform support for Qualcomm's MSM SoCs, DT binding
updates for TI's OMAP-ish processors and additional support for
Samsung chips.
Additionally there are other smaller clock driver changes and several
last minute fixes. This pull request also includes the HiSilicon
support that depends on the already-merged arm-soc pull request"
[ Fix up stupid compile error in the source tree with evil merge - Grumpy Linus ]
* tag 'clk-for-linus-3.14-part2' of git://git.linaro.org/people/mike.turquette/linux: (49 commits)
clk: sort Makefile
clk: sunxi: fix overflow when setting up divided factors
clk: Export more clk-provider functions
dt-bindings: qcom: Fix warning with duplicate dt define
clk: si5351: remove variant from platform_data
clk: samsung: Remove unneeded semicolon
clk: qcom: Fix modular build
ARM: OMAP3: use DT clock init if DT data is available
ARM: AM33xx: remove old clock data and link in new clock init code
ARM: AM43xx: Enable clock init
ARM: OMAP: DRA7: Enable clock init
ARM: OMAP4: remove old clock data and link in new clock init code
ARM: OMAP2+: io: use new clock init API
ARM: OMAP2+: PRM: add support for initializing PRCM clock modules from DT
ARM: OMAP3: hwmod: initialize clkdm from clkdm_name
ARM: OMAP: hwmod: fix an incorrect clk type cast with _get_clkdm
ARM: OMAP2+: clock: use driver API instead of direct memory read/write
ARM: OMAP2+: clock: add support for indexed memmaps
ARM: dts: am43xx clock data
ARM: dts: AM35xx: use DT clock data
...
Diffstat (limited to 'drivers/clk/ti/divider.c')
-rw-r--r-- | drivers/clk/ti/divider.c | 487 |
1 files changed, 487 insertions, 0 deletions
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c new file mode 100644 index 000000000000..a15e445570b2 --- /dev/null +++ b/drivers/clk/ti/divider.c @@ -0,0 +1,487 @@ +/* + * TI Divider Clock + * + * Copyright (C) 2013 Texas Instruments, Inc. + * + * Tero Kristo <t-kristo@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk-provider.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/clk/ti.h> + +#undef pr_fmt +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) + +#define div_mask(d) ((1 << ((d)->width)) - 1) + +static unsigned int _get_table_maxdiv(const struct clk_div_table *table) +{ + unsigned int maxdiv = 0; + const struct clk_div_table *clkt; + + for (clkt = table; clkt->div; clkt++) + if (clkt->div > maxdiv) + maxdiv = clkt->div; + return maxdiv; +} + +static unsigned int _get_maxdiv(struct clk_divider *divider) +{ + if (divider->flags & CLK_DIVIDER_ONE_BASED) + return div_mask(divider); + if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) + return 1 << div_mask(divider); + if (divider->table) + return _get_table_maxdiv(divider->table); + return div_mask(divider) + 1; +} + +static unsigned int _get_table_div(const struct clk_div_table *table, + unsigned int val) +{ + const struct clk_div_table *clkt; + + for (clkt = table; clkt->div; clkt++) + if (clkt->val == val) + return clkt->div; + return 0; +} + +static unsigned int _get_div(struct clk_divider *divider, unsigned int val) +{ + if (divider->flags & CLK_DIVIDER_ONE_BASED) + return val; + if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) + return 1 << val; + if (divider->table) + return _get_table_div(divider->table, val); + return val + 1; +} + +static unsigned int _get_table_val(const struct clk_div_table *table, + unsigned int div) +{ + const struct clk_div_table *clkt; + + for (clkt = table; clkt->div; clkt++) + if (clkt->div == div) + return clkt->val; + return 0; +} + +static unsigned int _get_val(struct clk_divider *divider, u8 div) +{ + if (divider->flags & CLK_DIVIDER_ONE_BASED) + return div; + if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) + return __ffs(div); + if (divider->table) + return _get_table_val(divider->table, div); + return div - 1; +} + +static unsigned long ti_clk_divider_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_divider *divider = to_clk_divider(hw); + unsigned int div, val; + + val = ti_clk_ll_ops->clk_readl(divider->reg) >> divider->shift; + val &= div_mask(divider); + + div = _get_div(divider, val); + if (!div) { + WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO), + "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n", + __clk_get_name(hw->clk)); + return parent_rate; + } + + return parent_rate / div; +} + +/* + * The reverse of DIV_ROUND_UP: The maximum number which + * divided by m is r + */ +#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1) + +static bool _is_valid_table_div(const struct clk_div_table *table, + unsigned int div) +{ + const struct clk_div_table *clkt; + + for (clkt = table; clkt->div; clkt++) + if (clkt->div == div) + return true; + return false; +} + +static bool _is_valid_div(struct clk_divider *divider, unsigned int div) +{ + if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) + return is_power_of_2(div); + if (divider->table) + return _is_valid_table_div(divider->table, div); + return true; +} + +static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, + unsigned long *best_parent_rate) +{ + struct clk_divider *divider = to_clk_divider(hw); + int i, bestdiv = 0; + unsigned long parent_rate, best = 0, now, maxdiv; + unsigned long parent_rate_saved = *best_parent_rate; + + if (!rate) + rate = 1; + + maxdiv = _get_maxdiv(divider); + + if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { + parent_rate = *best_parent_rate; + bestdiv = DIV_ROUND_UP(parent_rate, rate); + bestdiv = bestdiv == 0 ? 1 : bestdiv; + bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; + return bestdiv; + } + + /* + * The maximum divider we can use without overflowing + * unsigned long in rate * i below + */ + maxdiv = min(ULONG_MAX / rate, maxdiv); + + for (i = 1; i <= maxdiv; i++) { + if (!_is_valid_div(divider, i)) + continue; + if (rate * i == parent_rate_saved) { + /* + * It's the most ideal case if the requested rate can be + * divided from parent clock without needing to change + * parent rate, so return the divider immediately. + */ + *best_parent_rate = parent_rate_saved; + return i; + } + parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), + MULT_ROUND_UP(rate, i)); + now = parent_rate / i; + if (now <= rate && now > best) { + bestdiv = i; + best = now; + *best_parent_rate = parent_rate; + } + } + + if (!bestdiv) { + bestdiv = _get_maxdiv(divider); + *best_parent_rate = + __clk_round_rate(__clk_get_parent(hw->clk), 1); + } + + return bestdiv; +} + +static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + int div; + div = ti_clk_divider_bestdiv(hw, rate, prate); + + return *prate / div; +} + +static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_divider *divider = to_clk_divider(hw); + unsigned int div, value; + unsigned long flags = 0; + u32 val; + + div = parent_rate / rate; + value = _get_val(divider, div); + + if (value > div_mask(divider)) + value = div_mask(divider); + + if (divider->lock) + spin_lock_irqsave(divider->lock, flags); + + if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { + val = div_mask(divider) << (divider->shift + 16); + } else { + val = ti_clk_ll_ops->clk_readl(divider->reg); + val &= ~(div_mask(divider) << divider->shift); + } + val |= value << divider->shift; + ti_clk_ll_ops->clk_writel(val, divider->reg); + + if (divider->lock) + spin_unlock_irqrestore(divider->lock, flags); + + return 0; +} + +const struct clk_ops ti_clk_divider_ops = { + .recalc_rate = ti_clk_divider_recalc_rate, + .round_rate = ti_clk_divider_round_rate, + .set_rate = ti_clk_divider_set_rate, +}; + +static struct clk *_register_divider(struct device *dev, const char *name, + const char *parent_name, + unsigned long flags, void __iomem *reg, + u8 shift, u8 width, u8 clk_divider_flags, + const struct clk_div_table *table, + spinlock_t *lock) +{ + struct clk_divider *div; + struct clk *clk; + struct clk_init_data init; + + if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) { + if (width + shift > 16) { + pr_warn("divider value exceeds LOWORD field\n"); + return ERR_PTR(-EINVAL); + } + } + + /* allocate the divider */ + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) { + pr_err("%s: could not allocate divider clk\n", __func__); + return ERR_PTR(-ENOMEM); + } + + init.name = name; + init.ops = &ti_clk_divider_ops; + init.flags = flags | CLK_IS_BASIC; + init.parent_names = (parent_name ? &parent_name : NULL); + init.num_parents = (parent_name ? 1 : 0); + + /* struct clk_divider assignments */ + div->reg = reg; + div->shift = shift; + div->width = width; + div->flags = clk_divider_flags; + div->lock = lock; + div->hw.init = &init; + div->table = table; + + /* register the clock */ + clk = clk_register(dev, &div->hw); + + if (IS_ERR(clk)) + kfree(div); + + return clk; +} + +static struct clk_div_table +__init *ti_clk_get_div_table(struct device_node *node) +{ + struct clk_div_table *table; + const __be32 *divspec; + u32 val; + u32 num_div; + u32 valid_div; + int i; + + divspec = of_get_property(node, "ti,dividers", &num_div); + + if (!divspec) + return NULL; + + num_div /= 4; + + valid_div = 0; + + /* Determine required size for divider table */ + for (i = 0; i < num_div; i++) { + of_property_read_u32_index(node, "ti,dividers", i, &val); + if (val) + valid_div++; + } + + if (!valid_div) { + pr_err("no valid dividers for %s table\n", node->name); + return ERR_PTR(-EINVAL); + } + + table = kzalloc(sizeof(*table) * (valid_div + 1), GFP_KERNEL); + + if (!table) + return ERR_PTR(-ENOMEM); + + valid_div = 0; + + for (i = 0; i < num_div; i++) { + of_property_read_u32_index(node, "ti,dividers", i, &val); + if (val) { + table[valid_div].div = val; + table[valid_div].val = i; + valid_div++; + } + } + + return table; +} + +static int _get_divider_width(struct device_node *node, + const struct clk_div_table *table, + u8 flags) +{ + u32 min_div; + u32 max_div; + u32 val = 0; + u32 div; + + if (!table) { + /* Clk divider table not provided, determine min/max divs */ + if (of_property_read_u32(node, "ti,min-div", &min_div)) + min_div = 1; + + if (of_property_read_u32(node, "ti,max-div", &max_div)) { + pr_err("no max-div for %s!\n", node->name); + return -EINVAL; + } + + /* Determine bit width for the field */ + if (flags & CLK_DIVIDER_ONE_BASED) + val = 1; + + div = min_div; + + while (div < max_div) { + if (flags & CLK_DIVIDER_POWER_OF_TWO) + div <<= 1; + else + div++; + val++; + } + } else { + div = 0; + + while (table[div].div) { + val = table[div].val; + div++; + } + } + + return fls(val); +} + +static int __init ti_clk_divider_populate(struct device_node *node, + void __iomem **reg, const struct clk_div_table **table, + u32 *flags, u8 *div_flags, u8 *width, u8 *shift) +{ + u32 val; + + *reg = ti_clk_get_reg_addr(node, 0); + if (!*reg) + return -EINVAL; + + if (!of_property_read_u32(node, "ti,bit-shift", &val)) + *shift = val; + else + *shift = 0; + + *flags = 0; + *div_flags = 0; + + if (of_property_read_bool(node, "ti,index-starts-at-one")) + *div_flags |= CLK_DIVIDER_ONE_BASED; + + if (of_property_read_bool(node, "ti,index-power-of-two")) + *div_flags |= CLK_DIVIDER_POWER_OF_TWO; + + if (of_property_read_bool(node, "ti,set-rate-parent")) + *flags |= CLK_SET_RATE_PARENT; + + *table = ti_clk_get_div_table(node); + + if (IS_ERR(*table)) + return PTR_ERR(*table); + + *width = _get_divider_width(node, *table, *div_flags); + + return 0; +} + +/** + * of_ti_divider_clk_setup - Setup function for simple div rate clock + * @node: device node for this clock + * + * Sets up a basic divider clock. + */ +static void __init of_ti_divider_clk_setup(struct device_node *node) +{ + struct clk *clk; + const char *parent_name; + void __iomem *reg; + u8 clk_divider_flags = 0; + u8 width = 0; + u8 shift = 0; + const struct clk_div_table *table = NULL; + u32 flags = 0; + + parent_name = of_clk_get_parent_name(node, 0); + + if (ti_clk_divider_populate(node, ®, &table, &flags, + &clk_divider_flags, &width, &shift)) + goto cleanup; + + clk = _register_divider(NULL, node->name, parent_name, flags, reg, + shift, width, clk_divider_flags, table, NULL); + + if (!IS_ERR(clk)) { + of_clk_add_provider(node, of_clk_src_simple_get, clk); + of_ti_clk_autoidle_setup(node); + return; + } + +cleanup: + kfree(table); +} +CLK_OF_DECLARE(divider_clk, "ti,divider-clock", of_ti_divider_clk_setup); + +static void __init of_ti_composite_divider_clk_setup(struct device_node *node) +{ + struct clk_divider *div; + u32 val; + + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) + return; + + if (ti_clk_divider_populate(node, &div->reg, &div->table, &val, + &div->flags, &div->width, &div->shift) < 0) + goto cleanup; + + if (!ti_clk_add_component(node, &div->hw, CLK_COMPONENT_TYPE_DIVIDER)) + return; + +cleanup: + kfree(div->table); + kfree(div); +} +CLK_OF_DECLARE(ti_composite_divider_clk, "ti,composite-divider-clock", + of_ti_composite_divider_clk_setup); |