1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
|
// SPDX-License-Identifier: GPL-2.0
/*
* Zynq UltraScale+ MPSoC Divider support
*
* Copyright (C) 2016-2018 Xilinx
*
* Adjustable divider clock implementation
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/slab.h>
#include "clk-zynqmp.h"
/*
* DOC: basic adjustable divider clock that cannot gate
*
* Traits of this clock:
* prepare - clk_prepare only ensures that parents are prepared
* enable - clk_enable only ensures that parents are enabled
* rate - rate is adjustable. clk->rate = ceiling(parent->rate / divisor)
* parent - fixed parent. No clk_set_parent support
*/
#define to_zynqmp_clk_divider(_hw) \
container_of(_hw, struct zynqmp_clk_divider, hw)
#define CLK_FRAC BIT(13) /* has a fractional parent */
/**
* struct zynqmp_clk_divider - adjustable divider clock
* @hw: handle between common and hardware-specific interfaces
* @flags: Hardware specific flags
* @is_frac: The divider is a fractional divider
* @clk_id: Id of clock
* @div_type: divisor type (TYPE_DIV1 or TYPE_DIV2)
*/
struct zynqmp_clk_divider {
struct clk_hw hw;
u8 flags;
bool is_frac;
u32 clk_id;
u32 div_type;
u16 max_div;
};
static inline int zynqmp_divider_get_val(unsigned long parent_rate,
unsigned long rate)
{
return DIV_ROUND_CLOSEST(parent_rate, rate);
}
/**
* zynqmp_clk_divider_recalc_rate() - Recalc rate of divider clock
* @hw: handle between common and hardware-specific interfaces
* @parent_rate: rate of parent clock
*
* Return: 0 on success else error+reason
*/
static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct zynqmp_clk_divider *divider = to_zynqmp_clk_divider(hw);
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = divider->clk_id;
u32 div_type = divider->div_type;
u32 div, value;
int ret;
const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
ret = eemi_ops->clock_getdivider(clk_id, &div);
if (ret)
pr_warn_once("%s() get divider failed for %s, ret = %d\n",
__func__, clk_name, ret);
if (div_type == TYPE_DIV1)
value = div & 0xFFFF;
else
value = div >> 16;
if (!value) {
WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
"%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
clk_name);
return parent_rate;
}
return DIV_ROUND_UP_ULL(parent_rate, value);
}
/**
* zynqmp_clk_divider_round_rate() - Round rate of divider clock
* @hw: handle between common and hardware-specific interfaces
* @rate: rate of clock to be set
* @prate: rate of parent clock
*
* Return: 0 on success else error+reason
*/
static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
unsigned long rate,
unsigned long *prate)
{
struct zynqmp_clk_divider *divider = to_zynqmp_clk_divider(hw);
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = divider->clk_id;
u32 div_type = divider->div_type;
u32 bestdiv;
int ret;
const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
/* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
ret = eemi_ops->clock_getdivider(clk_id, &bestdiv);
if (ret)
pr_warn_once("%s() get divider failed for %s, ret = %d\n",
__func__, clk_name, ret);
if (div_type == TYPE_DIV1)
bestdiv = bestdiv & 0xFFFF;
else
bestdiv = bestdiv >> 16;
return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
}
bestdiv = zynqmp_divider_get_val(*prate, rate);
if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
bestdiv = rate % *prate ? 1 : bestdiv;
*prate = rate * bestdiv;
return rate;
}
/**
* zynqmp_clk_divider_set_rate() - Set rate of divider clock
* @hw: handle between common and hardware-specific interfaces
* @rate: rate of clock to be set
* @parent_rate: rate of parent clock
*
* Return: 0 on success else error+reason
*/
static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct zynqmp_clk_divider *divider = to_zynqmp_clk_divider(hw);
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = divider->clk_id;
u32 div_type = divider->div_type;
u32 value, div;
int ret;
const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
value = zynqmp_divider_get_val(parent_rate, rate);
if (div_type == TYPE_DIV1) {
div = value & 0xFFFF;
div |= 0xffff << 16;
} else {
div = 0xffff;
div |= value << 16;
}
ret = eemi_ops->clock_setdivider(clk_id, div);
if (ret)
pr_warn_once("%s() set divider failed for %s, ret = %d\n",
__func__, clk_name, ret);
return ret;
}
static const struct clk_ops zynqmp_clk_divider_ops = {
.recalc_rate = zynqmp_clk_divider_recalc_rate,
.round_rate = zynqmp_clk_divider_round_rate,
.set_rate = zynqmp_clk_divider_set_rate,
};
/**
* zynqmp_clk_get_max_divisor() - Get maximum supported divisor from firmware.
* @clk_id: Id of clock
* @type: Divider type
*
* Return: Maximum divisor of a clock if query data is successful
* U16_MAX in case of query data is not success
*/
u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type)
{
const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
struct zynqmp_pm_query_data qdata = {0};
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
qdata.qid = PM_QID_CLOCK_GET_MAX_DIVISOR;
qdata.arg1 = clk_id;
qdata.arg2 = type;
ret = eemi_ops->query_data(qdata, ret_payload);
/*
* To maintain backward compatibility return maximum possible value
* (0xFFFF) if query for max divisor is not successful.
*/
if (ret)
return U16_MAX;
return ret_payload[1];
}
/**
* zynqmp_clk_register_divider() - Register a divider clock
* @name: Name of this clock
* @clk_id: Id of clock
* @parents: Name of this clock's parents
* @num_parents: Number of parents
* @nodes: Clock topology node
*
* Return: clock hardware to registered clock divider
*/
struct clk_hw *zynqmp_clk_register_divider(const char *name,
u32 clk_id,
const char * const *parents,
u8 num_parents,
const struct clock_topology *nodes)
{
struct zynqmp_clk_divider *div;
struct clk_hw *hw;
struct clk_init_data init;
int ret;
/* allocate the divider */
div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &zynqmp_clk_divider_ops;
/* CLK_FRAC is not defined in the common clk framework */
init.flags = nodes->flag & ~CLK_FRAC;
init.parent_names = parents;
init.num_parents = 1;
/* struct clk_divider assignments */
div->is_frac = !!(nodes->flag & CLK_FRAC);
div->flags = nodes->type_flag;
div->hw.init = &init;
div->clk_id = clk_id;
div->div_type = nodes->type;
/*
* To achieve best possible rate, maximum limit of divider is required
* while computation.
*/
div->max_div = zynqmp_clk_get_max_divisor(clk_id, nodes->type);
hw = &div->hw;
ret = clk_hw_register(NULL, hw);
if (ret) {
kfree(div);
hw = ERR_PTR(ret);
}
return hw;
}
|