1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Analog Devices Generic AXI ADC IP core
* Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
*
* Copyright 2012-2020 Analog Devices Inc.
*/
#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/fpga/adi-axi-common.h>
#include <linux/iio/backend.h>
#include <linux/iio/buffer-dmaengine.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
/*
* Register definitions:
* https://wiki.analog.com/resources/fpga/docs/axi_adc_ip#register_map
*/
/* ADC controls */
#define ADI_AXI_REG_RSTN 0x0040
#define ADI_AXI_REG_RSTN_CE_N BIT(2)
#define ADI_AXI_REG_RSTN_MMCM_RSTN BIT(1)
#define ADI_AXI_REG_RSTN_RSTN BIT(0)
#define ADI_AXI_ADC_REG_CTRL 0x0044
#define ADI_AXI_ADC_CTRL_DDR_EDGESEL_MASK BIT(1)
#define ADI_AXI_ADC_REG_DRP_STATUS 0x0074
#define ADI_AXI_ADC_DRP_LOCKED BIT(17)
/* ADC Channel controls */
#define ADI_AXI_REG_CHAN_CTRL(c) (0x0400 + (c) * 0x40)
#define ADI_AXI_REG_CHAN_CTRL_LB_OWR BIT(11)
#define ADI_AXI_REG_CHAN_CTRL_PN_SEL_OWR BIT(10)
#define ADI_AXI_REG_CHAN_CTRL_IQCOR_EN BIT(9)
#define ADI_AXI_REG_CHAN_CTRL_DCFILT_EN BIT(8)
#define ADI_AXI_REG_CHAN_CTRL_FMT_MASK GENMASK(6, 4)
#define ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT BIT(6)
#define ADI_AXI_REG_CHAN_CTRL_FMT_TYPE BIT(5)
#define ADI_AXI_REG_CHAN_CTRL_FMT_EN BIT(4)
#define ADI_AXI_REG_CHAN_CTRL_PN_TYPE_OWR BIT(1)
#define ADI_AXI_REG_CHAN_CTRL_ENABLE BIT(0)
#define ADI_AXI_ADC_REG_CHAN_STATUS(c) (0x0404 + (c) * 0x40)
#define ADI_AXI_ADC_CHAN_STAT_PN_MASK GENMASK(2, 1)
#define ADI_AXI_ADC_REG_CHAN_CTRL_3(c) (0x0418 + (c) * 0x40)
#define ADI_AXI_ADC_CHAN_PN_SEL_MASK GENMASK(19, 16)
/* IO Delays */
#define ADI_AXI_ADC_REG_DELAY(l) (0x0800 + (l) * 0x4)
#define AXI_ADC_DELAY_CTRL_MASK GENMASK(4, 0)
#define ADI_AXI_ADC_MAX_IO_NUM_LANES 15
#define ADI_AXI_REG_CHAN_CTRL_DEFAULTS \
(ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT | \
ADI_AXI_REG_CHAN_CTRL_FMT_EN | \
ADI_AXI_REG_CHAN_CTRL_ENABLE)
struct adi_axi_adc_state {
struct regmap *regmap;
struct device *dev;
/* lock to protect multiple accesses to the device registers */
struct mutex lock;
};
static int axi_adc_enable(struct iio_backend *back)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
unsigned int __val;
int ret;
guard(mutex)(&st->lock);
ret = regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN,
ADI_AXI_REG_RSTN_MMCM_RSTN);
if (ret)
return ret;
/*
* Make sure the DRP (Dynamic Reconfiguration Port) is locked. Not all
* designs really use it but if they don't we still get the lock bit
* set. So let's do it all the time so the code is generic.
*/
ret = regmap_read_poll_timeout(st->regmap, ADI_AXI_ADC_REG_DRP_STATUS,
__val, __val & ADI_AXI_ADC_DRP_LOCKED,
100, 1000);
if (ret)
return ret;
return regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN,
ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
}
static void axi_adc_disable(struct iio_backend *back)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
guard(mutex)(&st->lock);
regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0);
}
static int axi_adc_data_format_set(struct iio_backend *back, unsigned int chan,
const struct iio_backend_data_fmt *data)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
u32 val;
if (!data->enable)
return regmap_clear_bits(st->regmap,
ADI_AXI_REG_CHAN_CTRL(chan),
ADI_AXI_REG_CHAN_CTRL_FMT_EN);
val = FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_EN, true);
if (data->sign_extend)
val |= FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT, true);
if (data->type == IIO_BACKEND_OFFSET_BINARY)
val |= FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_TYPE, true);
return regmap_update_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
ADI_AXI_REG_CHAN_CTRL_FMT_MASK, val);
}
static int axi_adc_data_sample_trigger(struct iio_backend *back,
enum iio_backend_sample_trigger trigger)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
switch (trigger) {
case IIO_BACKEND_SAMPLE_TRIGGER_EDGE_RISING:
return regmap_clear_bits(st->regmap, ADI_AXI_ADC_REG_CTRL,
ADI_AXI_ADC_CTRL_DDR_EDGESEL_MASK);
case IIO_BACKEND_SAMPLE_TRIGGER_EDGE_FALLING:
return regmap_set_bits(st->regmap, ADI_AXI_ADC_REG_CTRL,
ADI_AXI_ADC_CTRL_DDR_EDGESEL_MASK);
default:
return -EINVAL;
}
}
static int axi_adc_iodelays_set(struct iio_backend *back, unsigned int lane,
unsigned int tap)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
int ret;
u32 val;
if (tap > FIELD_MAX(AXI_ADC_DELAY_CTRL_MASK))
return -EINVAL;
if (lane > ADI_AXI_ADC_MAX_IO_NUM_LANES)
return -EINVAL;
guard(mutex)(&st->lock);
ret = regmap_write(st->regmap, ADI_AXI_ADC_REG_DELAY(lane), tap);
if (ret)
return ret;
/*
* If readback is ~0, that means there are issues with the
* delay_clk.
*/
ret = regmap_read(st->regmap, ADI_AXI_ADC_REG_DELAY(lane), &val);
if (ret)
return ret;
if (val == U32_MAX)
return -EIO;
return 0;
}
static int axi_adc_test_pattern_set(struct iio_backend *back,
unsigned int chan,
enum iio_backend_test_pattern pattern)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
switch (pattern) {
case IIO_BACKEND_NO_TEST_PATTERN:
/* nothing to do */
return 0;
case IIO_BACKEND_ADI_PRBS_9A:
return regmap_update_bits(st->regmap, ADI_AXI_ADC_REG_CHAN_CTRL_3(chan),
ADI_AXI_ADC_CHAN_PN_SEL_MASK,
FIELD_PREP(ADI_AXI_ADC_CHAN_PN_SEL_MASK, 0));
default:
return -EINVAL;
}
}
static int axi_adc_chan_status(struct iio_backend *back, unsigned int chan,
bool *error)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
int ret;
u32 val;
guard(mutex)(&st->lock);
/* reset test bits by setting them */
ret = regmap_write(st->regmap, ADI_AXI_ADC_REG_CHAN_STATUS(chan),
ADI_AXI_ADC_CHAN_STAT_PN_MASK);
if (ret)
return ret;
/* let's give enough time to validate or erroring the incoming pattern */
fsleep(1000);
ret = regmap_read(st->regmap, ADI_AXI_ADC_REG_CHAN_STATUS(chan), &val);
if (ret)
return ret;
if (ADI_AXI_ADC_CHAN_STAT_PN_MASK & val)
*error = true;
else
*error = false;
return 0;
}
static int axi_adc_chan_enable(struct iio_backend *back, unsigned int chan)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
return regmap_set_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
ADI_AXI_REG_CHAN_CTRL_ENABLE);
}
static int axi_adc_chan_disable(struct iio_backend *back, unsigned int chan)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
return regmap_clear_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
ADI_AXI_REG_CHAN_CTRL_ENABLE);
}
static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back,
struct iio_dev *indio_dev)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
const char *dma_name;
if (device_property_read_string(st->dev, "dma-names", &dma_name))
dma_name = "rx";
return iio_dmaengine_buffer_setup(st->dev, indio_dev, dma_name);
}
static void axi_adc_free_buffer(struct iio_backend *back,
struct iio_buffer *buffer)
{
iio_dmaengine_buffer_free(buffer);
}
static const struct regmap_config axi_adc_regmap_config = {
.val_bits = 32,
.reg_bits = 32,
.reg_stride = 4,
};
static const struct iio_backend_ops adi_axi_adc_generic = {
.enable = axi_adc_enable,
.disable = axi_adc_disable,
.data_format_set = axi_adc_data_format_set,
.chan_enable = axi_adc_chan_enable,
.chan_disable = axi_adc_chan_disable,
.request_buffer = axi_adc_request_buffer,
.free_buffer = axi_adc_free_buffer,
.data_sample_trigger = axi_adc_data_sample_trigger,
.iodelay_set = axi_adc_iodelays_set,
.test_pattern_set = axi_adc_test_pattern_set,
.chan_status = axi_adc_chan_status,
};
static int adi_axi_adc_probe(struct platform_device *pdev)
{
const unsigned int *expected_ver;
struct adi_axi_adc_state *st;
void __iomem *base;
unsigned int ver;
struct clk *clk;
int ret;
st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
st->dev = &pdev->dev;
st->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&axi_adc_regmap_config);
if (IS_ERR(st->regmap))
return PTR_ERR(st->regmap);
expected_ver = device_get_match_data(&pdev->dev);
if (!expected_ver)
return -ENODEV;
clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
/*
* Force disable the core. Up to the frontend to enable us. And we can
* still read/write registers...
*/
ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0);
if (ret)
return ret;
ret = regmap_read(st->regmap, ADI_AXI_REG_VERSION, &ver);
if (ret)
return ret;
if (ADI_AXI_PCORE_VER_MAJOR(ver) != ADI_AXI_PCORE_VER_MAJOR(*expected_ver)) {
dev_err(&pdev->dev,
"Major version mismatch. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
ADI_AXI_PCORE_VER_MAJOR(*expected_ver),
ADI_AXI_PCORE_VER_MINOR(*expected_ver),
ADI_AXI_PCORE_VER_PATCH(*expected_ver),
ADI_AXI_PCORE_VER_MAJOR(ver),
ADI_AXI_PCORE_VER_MINOR(ver),
ADI_AXI_PCORE_VER_PATCH(ver));
return -ENODEV;
}
ret = devm_iio_backend_register(&pdev->dev, &adi_axi_adc_generic, st);
if (ret)
return ret;
dev_info(&pdev->dev, "AXI ADC IP core (%d.%.2d.%c) probed\n",
ADI_AXI_PCORE_VER_MAJOR(ver),
ADI_AXI_PCORE_VER_MINOR(ver),
ADI_AXI_PCORE_VER_PATCH(ver));
return 0;
}
static unsigned int adi_axi_adc_10_0_a_info = ADI_AXI_PCORE_VER(10, 0, 'a');
/* Match table for of_platform binding */
static const struct of_device_id adi_axi_adc_of_match[] = {
{ .compatible = "adi,axi-adc-10.0.a", .data = &adi_axi_adc_10_0_a_info },
{ /* end of list */ }
};
MODULE_DEVICE_TABLE(of, adi_axi_adc_of_match);
static struct platform_driver adi_axi_adc_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = adi_axi_adc_of_match,
},
.probe = adi_axi_adc_probe,
};
module_platform_driver(adi_axi_adc_driver);
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Analog Devices Generic AXI ADC IP core driver");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(IIO_DMAENGINE_BUFFER);
MODULE_IMPORT_NS(IIO_BACKEND);
|