diff options
Diffstat (limited to 'drivers/clk')
-rw-r--r-- | drivers/clk/clk-aspeed.c | 59 | ||||
-rw-r--r-- | drivers/clk/clk.c | 202 | ||||
-rw-r--r-- | drivers/clk/meson/clk-audio-divider.c | 2 | ||||
-rw-r--r-- | drivers/clk/meson/gxbb.c | 1 | ||||
-rw-r--r-- | drivers/clk/mvebu/armada-37xx-periph.c | 38 | ||||
-rw-r--r-- | drivers/clk/qcom/gcc-msm8996.c | 1 | ||||
-rw-r--r-- | drivers/clk/qcom/mmcc-msm8996.c | 1 |
7 files changed, 281 insertions, 23 deletions
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c index 38b366b00c57..7b70a074095d 100644 --- a/drivers/clk/clk-aspeed.c +++ b/drivers/clk/clk-aspeed.c @@ -24,7 +24,7 @@ #define ASPEED_MPLL_PARAM 0x20 #define ASPEED_HPLL_PARAM 0x24 #define AST2500_HPLL_BYPASS_EN BIT(20) -#define AST2400_HPLL_STRAPPED BIT(18) +#define AST2400_HPLL_PROGRAMMED BIT(18) #define AST2400_HPLL_BYPASS_EN BIT(17) #define ASPEED_MISC_CTRL 0x2c #define UART_DIV13_EN BIT(12) @@ -91,8 +91,8 @@ static const struct aspeed_gate_data aspeed_gates[] = { [ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */ [ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */ [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */ - [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */ - [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, 0 }, /* DAC */ + [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */ + [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */ [ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL }, [ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */ [ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */ @@ -212,9 +212,22 @@ static int aspeed_clk_is_enabled(struct clk_hw *hw) { struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); u32 clk = BIT(gate->clock_idx); + u32 rst = BIT(gate->reset_idx); u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk; u32 reg; + /* + * If the IP is in reset, treat the clock as not enabled, + * this happens with some clocks such as the USB one when + * coming from cold reset. Without this, aspeed_clk_enable() + * will fail to lift the reset. + */ + if (gate->reset_idx >= 0) { + regmap_read(gate->map, ASPEED_RESET_CTRL, ®); + if (reg & rst) + return 0; + } + regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, ®); return ((reg & clk) == enval) ? 1 : 0; @@ -565,29 +578,45 @@ builtin_platform_driver(aspeed_clk_driver); static void __init aspeed_ast2400_cc(struct regmap *map) { struct clk_hw *hw; - u32 val, freq, div; + u32 val, div, clkin, hpll; + const u16 hpll_rates[][4] = { + {384, 360, 336, 408}, + {400, 375, 350, 425}, + }; + int rate; /* * CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by * strapping */ regmap_read(map, ASPEED_STRAP, &val); - if (val & CLKIN_25MHZ_EN) - freq = 25000000; - else if (val & AST2400_CLK_SOURCE_SEL) - freq = 48000000; - else - freq = 24000000; - hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq); - pr_debug("clkin @%u MHz\n", freq / 1000000); + rate = (val >> 8) & 3; + if (val & CLKIN_25MHZ_EN) { + clkin = 25000000; + hpll = hpll_rates[1][rate]; + } else if (val & AST2400_CLK_SOURCE_SEL) { + clkin = 48000000; + hpll = hpll_rates[0][rate]; + } else { + clkin = 24000000; + hpll = hpll_rates[0][rate]; + } + hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, clkin); + pr_debug("clkin @%u MHz\n", clkin / 1000000); /* * High-speed PLL clock derived from the crystal. This the CPU clock, - * and we assume that it is enabled + * and we assume that it is enabled. It can be configured through the + * HPLL_PARAM register, or set to a specified frequency by strapping. */ regmap_read(map, ASPEED_HPLL_PARAM, &val); - WARN(val & AST2400_HPLL_STRAPPED, "hpll is strapped not configured"); - aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2400_calc_pll("hpll", val); + if (val & AST2400_HPLL_PROGRAMMED) + hw = aspeed_ast2400_calc_pll("hpll", val); + else + hw = clk_hw_register_fixed_rate(NULL, "hpll", "clkin", 0, + hpll * 1000000); + + aspeed_clk_data->hws[ASPEED_CLK_HPLL] = hw; /* * Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK) diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 9760b526ca31..976f59e11f9a 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -24,7 +24,6 @@ #include <linux/pm_runtime.h> #include <linux/sched.h> #include <linux/clkdev.h> -#include <linux/stringify.h> #include "clk.h" @@ -68,6 +67,7 @@ struct clk_core { unsigned long max_rate; unsigned long accuracy; int phase; + struct clk_duty duty; struct hlist_head children; struct hlist_node child_node; struct hlist_head clks; @@ -2402,6 +2402,172 @@ int clk_get_phase(struct clk *clk) } EXPORT_SYMBOL_GPL(clk_get_phase); +static void clk_core_reset_duty_cycle_nolock(struct clk_core *core) +{ + /* Assume a default value of 50% */ + core->duty.num = 1; + core->duty.den = 2; +} + +static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core); + +static int clk_core_update_duty_cycle_nolock(struct clk_core *core) +{ + struct clk_duty *duty = &core->duty; + int ret = 0; + + if (!core->ops->get_duty_cycle) + return clk_core_update_duty_cycle_parent_nolock(core); + + ret = core->ops->get_duty_cycle(core->hw, duty); + if (ret) + goto reset; + + /* Don't trust the clock provider too much */ + if (duty->den == 0 || duty->num > duty->den) { + ret = -EINVAL; + goto reset; + } + + return 0; + +reset: + clk_core_reset_duty_cycle_nolock(core); + return ret; +} + +static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core) +{ + int ret = 0; + + if (core->parent && + core->flags & CLK_DUTY_CYCLE_PARENT) { + ret = clk_core_update_duty_cycle_nolock(core->parent); + memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); + } else { + clk_core_reset_duty_cycle_nolock(core); + } + + return ret; +} + +static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, + struct clk_duty *duty); + +static int clk_core_set_duty_cycle_nolock(struct clk_core *core, + struct clk_duty *duty) +{ + int ret; + + lockdep_assert_held(&prepare_lock); + + if (clk_core_rate_is_protected(core)) + return -EBUSY; + + trace_clk_set_duty_cycle(core, duty); + + if (!core->ops->set_duty_cycle) + return clk_core_set_duty_cycle_parent_nolock(core, duty); + + ret = core->ops->set_duty_cycle(core->hw, duty); + if (!ret) + memcpy(&core->duty, duty, sizeof(*duty)); + + trace_clk_set_duty_cycle_complete(core, duty); + + return ret; +} + +static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core, + struct clk_duty *duty) +{ + int ret = 0; + + if (core->parent && + core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) { + ret = clk_core_set_duty_cycle_nolock(core->parent, duty); + memcpy(&core->duty, &core->parent->duty, sizeof(core->duty)); + } + + return ret; +} + +/** + * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal + * @clk: clock signal source + * @num: numerator of the duty cycle ratio to be applied + * @den: denominator of the duty cycle ratio to be applied + * + * Apply the duty cycle ratio if the ratio is valid and the clock can + * perform this operation + * + * Returns (0) on success, a negative errno otherwise. + */ +int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den) +{ + int ret; + struct clk_duty duty; + + if (!clk) + return 0; + + /* sanity check the ratio */ + if (den == 0 || num > den) + return -EINVAL; + + duty.num = num; + duty.den = den; + + clk_prepare_lock(); + + if (clk->exclusive_count) + clk_core_rate_unprotect(clk->core); + + ret = clk_core_set_duty_cycle_nolock(clk->core, &duty); + + if (clk->exclusive_count) + clk_core_rate_protect(clk->core); + + clk_prepare_unlock(); + + return ret; +} +EXPORT_SYMBOL_GPL(clk_set_duty_cycle); + +static int clk_core_get_scaled_duty_cycle(struct clk_core *core, + unsigned int scale) +{ + struct clk_duty *duty = &core->duty; + int ret; + + clk_prepare_lock(); + + ret = clk_core_update_duty_cycle_nolock(core); + if (!ret) + ret = mult_frac(scale, duty->num, duty->den); + + clk_prepare_unlock(); + + return ret; +} + +/** + * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal + * @clk: clock signal source + * @scale: scaling factor to be applied to represent the ratio as an integer + * + * Returns the duty cycle ratio of a clock node multiplied by the provided + * scaling factor, or negative errno on error. + */ +int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale) +{ + if (!clk) + return 0; + + return clk_core_get_scaled_duty_cycle(clk->core, scale); +} +EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle); + /** * clk_is_match - check if two clk's point to the same hardware clock * @p: clk compared against q @@ -2455,12 +2621,13 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, if (!c) return; - seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %-3d\n", + seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n", level * 3 + 1, "", 30 - level * 3, c->name, c->enable_count, c->prepare_count, c->protect_count, clk_core_get_rate(c), clk_core_get_accuracy(c), - clk_core_get_phase(c)); + clk_core_get_phase(c), + clk_core_get_scaled_duty_cycle(c, 100000)); } static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, @@ -2482,9 +2649,9 @@ static int clk_summary_show(struct seq_file *s, void *data) struct clk_core *c; struct hlist_head **lists = (struct hlist_head **)s->private; - seq_puts(s, " enable prepare protect \n"); - seq_puts(s, " clock count count count rate accuracy phase\n"); - seq_puts(s, "----------------------------------------------------------------------------------------\n"); + seq_puts(s, " enable prepare protect duty\n"); + seq_puts(s, " clock count count count rate accuracy phase cycle\n"); + seq_puts(s, "---------------------------------------------------------------------------------------------\n"); clk_prepare_lock(); @@ -2511,6 +2678,8 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); + seq_printf(s, "\"duty_cycle\": %u", + clk_core_get_scaled_duty_cycle(c, 100000)); } static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) @@ -2559,7 +2728,7 @@ static const struct { unsigned long flag; const char *name; } clk_flags[] = { -#define ENTRY(f) { f, __stringify(f) } +#define ENTRY(f) { f, #f } ENTRY(CLK_SET_RATE_GATE), ENTRY(CLK_SET_PARENT_GATE), ENTRY(CLK_SET_RATE_PARENT), @@ -2572,6 +2741,7 @@ static const struct { ENTRY(CLK_SET_RATE_UNGATE), ENTRY(CLK_IS_CRITICAL), ENTRY(CLK_OPS_PARENT_ENABLE), + ENTRY(CLK_DUTY_CYCLE_PARENT), #undef ENTRY }; @@ -2610,6 +2780,17 @@ static int possible_parents_show(struct seq_file *s, void *data) } DEFINE_SHOW_ATTRIBUTE(possible_parents); +static int clk_duty_cycle_show(struct seq_file *s, void *data) +{ + struct clk_core *core = s->private; + struct clk_duty *duty = &core->duty; + + seq_printf(s, "%u/%u\n", duty->num, duty->den); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle); + static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) { struct dentry *root; @@ -2628,6 +2809,8 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count); debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count); debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count); + debugfs_create_file("clk_duty_cycle", 0444, root, core, + &clk_duty_cycle_fops); if (core->num_parents > 1) debugfs_create_file("clk_possible_parents", 0444, root, core, @@ -2846,6 +3029,11 @@ static int __clk_core_init(struct clk_core *core) core->phase = 0; /* + * Set clk's duty cycle. + */ + clk_core_update_duty_cycle_nolock(core); + + /* * Set clk's rate. The preferred method is to use .recalc_rate. For * simple clocks and lazy developers the default fallback is to use the * parent's rate. If a clock doesn't have a parent (or is orphaned) diff --git a/drivers/clk/meson/clk-audio-divider.c b/drivers/clk/meson/clk-audio-divider.c index 58f546e04807..e4cf96ba704e 100644 --- a/drivers/clk/meson/clk-audio-divider.c +++ b/drivers/clk/meson/clk-audio-divider.c @@ -51,7 +51,7 @@ static unsigned long audio_divider_recalc_rate(struct clk_hw *hw, struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk); unsigned long divider; - divider = meson_parm_read(clk->map, &adiv->div); + divider = meson_parm_read(clk->map, &adiv->div) + 1; return DIV_ROUND_UP_ULL((u64)parent_rate, divider); } diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 240658404367..177fffb9ebef 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -498,6 +498,7 @@ static struct clk_regmap gxbb_fclk_div2 = { .ops = &clk_regmap_gate_ops, .parent_names = (const char *[]){ "fclk_div2_div" }, .num_parents = 1, + .flags = CLK_IS_CRITICAL, }, }; diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c index 6860bd5a37c5..44e4e27eddad 100644 --- a/drivers/clk/mvebu/armada-37xx-periph.c +++ b/drivers/clk/mvebu/armada-37xx-periph.c @@ -35,6 +35,7 @@ #define CLK_SEL 0x10 #define CLK_DIS 0x14 +#define ARMADA_37XX_DVFS_LOAD_1 1 #define LOAD_LEVEL_NR 4 #define ARMADA_37XX_NB_L0L1 0x18 @@ -507,6 +508,40 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate, return -EINVAL; } +/* + * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz + * respectively) to L0 frequency (1.2 Ghz) requires a significant + * amount of time to let VDD stabilize to the appropriate + * voltage. This amount of time is large enough that it cannot be + * covered by the hardware countdown register. Due to this, the CPU + * might start operating at L0 before the voltage is stabilized, + * leading to CPU stalls. + * + * To work around this problem, we prevent switching directly from the + * L2/L3 frequencies to the L0 frequency, and instead switch to the L1 + * frequency in-between. The sequence therefore becomes: + * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ) + * 2. Sleep 20ms for stabling VDD voltage + * 3. Then switch from L1(600MHZ) to L0(1200Mhz). + */ +static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base) +{ + unsigned int cur_level; + + if (rate != 1200 * 1000 * 1000) + return; + + regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level); + cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK; + if (cur_level <= ARMADA_37XX_DVFS_LOAD_1) + return; + + regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD, + ARMADA_37XX_NB_CPU_LOAD_MASK, + ARMADA_37XX_DVFS_LOAD_1); + msleep(20); +} + static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { @@ -537,6 +572,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate, */ reg = ARMADA_37XX_NB_CPU_LOAD; mask = ARMADA_37XX_NB_CPU_LOAD_MASK; + + clk_pm_cpu_set_rate_wa(rate, base); + regmap_update_bits(base, reg, mask, load_level); return rate; diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c index 9f35b3fe1d97..ff8d66fd94e6 100644 --- a/drivers/clk/qcom/gcc-msm8996.c +++ b/drivers/clk/qcom/gcc-msm8996.c @@ -2781,6 +2781,7 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = { static struct clk_branch gcc_ufs_tx_symbol_0_clk = { .halt_reg = 0x75018, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x75018, .enable_mask = BIT(0), diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c index 1a25ee4f3658..4b20d1b67a1b 100644 --- a/drivers/clk/qcom/mmcc-msm8996.c +++ b/drivers/clk/qcom/mmcc-msm8996.c @@ -2910,6 +2910,7 @@ static struct gdsc mmagic_bimc_gdsc = { .name = "mmagic_bimc", }, .pwrsts = PWRSTS_OFF_ON, + .flags = ALWAYS_ON, }; static struct gdsc mmagic_video_gdsc = { |