1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* cnl-sst.c - DSP library functions for CNL platform
*
* Copyright (C) 2016-17, Intel Corporation.
*
* Author: Guneshwor Singh <guneshwor.o.singh@intel.com>
*
* Modified from:
* HDA DSP library functions for SKL platform
* Copyright (C) 2014-15, Intel Corporation.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/device.h>
#include "../common/sst-dsp.h"
#include "../common/sst-dsp-priv.h"
#include "../common/sst-ipc.h"
#include "cnl-sst-dsp.h"
#include "skl-sst-dsp.h"
#include "skl-sst-ipc.h"
#define CNL_FW_ROM_INIT 0x1
#define CNL_FW_INIT 0x5
#define CNL_IPC_PURGE 0x01004000
#define CNL_INIT_TIMEOUT 300
#define CNL_BASEFW_TIMEOUT 3000
#define CNL_ADSP_SRAM0_BASE 0x80000
/* Firmware status window */
#define CNL_ADSP_FW_STATUS CNL_ADSP_SRAM0_BASE
#define CNL_ADSP_ERROR_CODE (CNL_ADSP_FW_STATUS + 0x4)
#define CNL_INSTANCE_ID 0
#define CNL_BASE_FW_MODULE_ID 0
#define CNL_ADSP_FW_HDR_OFFSET 0x2000
#define CNL_ROM_CTRL_DMA_ID 0x9
static int cnl_prepare_fw(struct sst_dsp *ctx, const void *fwdata, u32 fwsize)
{
int ret, stream_tag;
stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40, fwsize, &ctx->dmab);
if (stream_tag <= 0) {
dev_err(ctx->dev, "dma prepare failed: 0%#x\n", stream_tag);
return stream_tag;
}
ctx->dsp_ops.stream_tag = stream_tag;
memcpy(ctx->dmab.area, fwdata, fwsize);
/* purge FW request */
sst_dsp_shim_write(ctx, CNL_ADSP_REG_HIPCIDR,
CNL_ADSP_REG_HIPCIDR_BUSY | (CNL_IPC_PURGE |
((stream_tag - 1) << CNL_ROM_CTRL_DMA_ID)));
ret = cnl_dsp_enable_core(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
dev_err(ctx->dev, "dsp boot core failed ret: %d\n", ret);
ret = -EIO;
goto base_fw_load_failed;
}
/* enable interrupt */
cnl_ipc_int_enable(ctx);
cnl_ipc_op_int_enable(ctx);
ret = sst_dsp_register_poll(ctx, CNL_ADSP_FW_STATUS, CNL_FW_STS_MASK,
CNL_FW_ROM_INIT, CNL_INIT_TIMEOUT,
"rom load");
if (ret < 0) {
dev_err(ctx->dev, "rom init timeout, ret: %d\n", ret);
goto base_fw_load_failed;
}
return 0;
base_fw_load_failed:
ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, stream_tag);
cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
return ret;
}
static int sst_transfer_fw_host_dma(struct sst_dsp *ctx)
{
int ret;
ctx->dsp_ops.trigger(ctx->dev, true, ctx->dsp_ops.stream_tag);
ret = sst_dsp_register_poll(ctx, CNL_ADSP_FW_STATUS, CNL_FW_STS_MASK,
CNL_FW_INIT, CNL_BASEFW_TIMEOUT,
"firmware boot");
ctx->dsp_ops.trigger(ctx->dev, false, ctx->dsp_ops.stream_tag);
ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, ctx->dsp_ops.stream_tag);
return ret;
}
static int cnl_load_base_firmware(struct sst_dsp *ctx)
{
struct firmware stripped_fw;
struct skl_sst *cnl = ctx->thread_context;
int ret;
if (!ctx->fw) {
ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
if (ret < 0) {
dev_err(ctx->dev, "request firmware failed: %d\n", ret);
goto cnl_load_base_firmware_failed;
}
}
/* parse uuids if first boot */
if (cnl->is_first_boot) {
ret = snd_skl_parse_uuids(ctx, ctx->fw,
CNL_ADSP_FW_HDR_OFFSET, 0);
if (ret < 0)
goto cnl_load_base_firmware_failed;
}
stripped_fw.data = ctx->fw->data;
stripped_fw.size = ctx->fw->size;
skl_dsp_strip_extended_manifest(&stripped_fw);
ret = cnl_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
if (ret < 0) {
dev_err(ctx->dev, "prepare firmware failed: %d\n", ret);
goto cnl_load_base_firmware_failed;
}
ret = sst_transfer_fw_host_dma(ctx);
if (ret < 0) {
dev_err(ctx->dev, "transfer firmware failed: %d\n", ret);
cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
goto cnl_load_base_firmware_failed;
}
ret = wait_event_timeout(cnl->boot_wait, cnl->boot_complete,
msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
if (ret == 0) {
dev_err(ctx->dev, "FW ready timed-out\n");
cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
ret = -EIO;
goto cnl_load_base_firmware_failed;
}
cnl->fw_loaded = true;
return 0;
cnl_load_base_firmware_failed:
release_firmware(ctx->fw);
ctx->fw = NULL;
return ret;
}
static int cnl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
{
struct skl_sst *cnl = ctx->thread_context;
unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
struct skl_ipc_dxstate_info dx;
int ret;
if (!cnl->fw_loaded) {
cnl->boot_complete = false;
ret = cnl_load_base_firmware(ctx);
if (ret < 0) {
dev_err(ctx->dev, "fw reload failed: %d\n", ret);
return ret;
}
cnl->cores.state[core_id] = SKL_DSP_RUNNING;
return ret;
}
ret = cnl_dsp_enable_core(ctx, core_mask);
if (ret < 0) {
dev_err(ctx->dev, "enable dsp core %d failed: %d\n",
core_id, ret);
goto err;
}
if (core_id == SKL_DSP_CORE0_ID) {
/* enable interrupt */
cnl_ipc_int_enable(ctx);
cnl_ipc_op_int_enable(ctx);
cnl->boot_complete = false;
ret = wait_event_timeout(cnl->boot_wait, cnl->boot_complete,
msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
if (ret == 0) {
dev_err(ctx->dev,
"dsp boot timeout, status=%#x error=%#x\n",
sst_dsp_shim_read(ctx, CNL_ADSP_FW_STATUS),
sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE));
goto err;
}
} else {
dx.core_mask = core_mask;
dx.dx_mask = core_mask;
ret = skl_ipc_set_dx(&cnl->ipc, CNL_INSTANCE_ID,
CNL_BASE_FW_MODULE_ID, &dx);
if (ret < 0) {
dev_err(ctx->dev, "set_dx failed, core: %d ret: %d\n",
core_id, ret);
goto err;
}
}
cnl->cores.state[core_id] = SKL_DSP_RUNNING;
return 0;
err:
cnl_dsp_disable_core(ctx, core_mask);
return ret;
}
static int cnl_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
{
struct skl_sst *cnl = ctx->thread_context;
unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
struct skl_ipc_dxstate_info dx;
int ret;
dx.core_mask = core_mask;
dx.dx_mask = SKL_IPC_D3_MASK;
ret = skl_ipc_set_dx(&cnl->ipc, CNL_INSTANCE_ID,
CNL_BASE_FW_MODULE_ID, &dx);
if (ret < 0) {
dev_err(ctx->dev,
"dsp core %d to d3 failed; continue reset\n",
core_id);
cnl->fw_loaded = false;
}
/* disable interrupts if core 0 */
if (core_id == SKL_DSP_CORE0_ID) {
skl_ipc_op_int_disable(ctx);
skl_ipc_int_disable(ctx);
}
ret = cnl_dsp_disable_core(ctx, core_mask);
if (ret < 0) {
dev_err(ctx->dev, "disable dsp core %d failed: %d\n",
core_id, ret);
return ret;
}
cnl->cores.state[core_id] = SKL_DSP_RESET;
return ret;
}
static unsigned int cnl_get_errno(struct sst_dsp *ctx)
{
return sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE);
}
static const struct skl_dsp_fw_ops cnl_fw_ops = {
.set_state_D0 = cnl_set_dsp_D0,
.set_state_D3 = cnl_set_dsp_D3,
.load_fw = cnl_load_base_firmware,
.get_fw_errcode = cnl_get_errno,
};
static struct sst_ops cnl_ops = {
.irq_handler = cnl_dsp_sst_interrupt,
.write = sst_shim32_write,
.read = sst_shim32_read,
.ram_read = sst_memcpy_fromio_32,
.ram_write = sst_memcpy_toio_32,
.free = cnl_dsp_free,
};
#define CNL_IPC_GLB_NOTIFY_RSP_SHIFT 29
#define CNL_IPC_GLB_NOTIFY_RSP_MASK 0x1
#define CNL_IPC_GLB_NOTIFY_RSP_TYPE(x) (((x) >> CNL_IPC_GLB_NOTIFY_RSP_SHIFT) \
& CNL_IPC_GLB_NOTIFY_RSP_MASK)
static irqreturn_t cnl_dsp_irq_thread_handler(int irq, void *context)
{
struct sst_dsp *dsp = context;
struct skl_sst *cnl = sst_dsp_get_thread_context(dsp);
struct sst_generic_ipc *ipc = &cnl->ipc;
struct skl_ipc_header header = {0};
u32 hipcida, hipctdr, hipctdd;
int ipc_irq = 0;
/* here we handle ipc interrupts only */
if (!(dsp->intr_status & CNL_ADSPIS_IPC))
return IRQ_NONE;
hipcida = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCIDA);
hipctdr = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCTDR);
/* reply message from dsp */
if (hipcida & CNL_ADSP_REG_HIPCIDA_DONE) {
sst_dsp_shim_update_bits(dsp, CNL_ADSP_REG_HIPCCTL,
CNL_ADSP_REG_HIPCCTL_DONE, 0);
/* clear done bit - tell dsp operation is complete */
sst_dsp_shim_update_bits_forced(dsp, CNL_ADSP_REG_HIPCIDA,
CNL_ADSP_REG_HIPCIDA_DONE, CNL_ADSP_REG_HIPCIDA_DONE);
ipc_irq = 1;
/* unmask done interrupt */
sst_dsp_shim_update_bits(dsp, CNL_ADSP_REG_HIPCCTL,
CNL_ADSP_REG_HIPCCTL_DONE, CNL_ADSP_REG_HIPCCTL_DONE);
}
/* new message from dsp */
if (hipctdr & CNL_ADSP_REG_HIPCTDR_BUSY) {
hipctdd = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCTDD);
header.primary = hipctdr;
header.extension = hipctdd;
dev_dbg(dsp->dev, "IPC irq: Firmware respond primary:%x",
header.primary);
dev_dbg(dsp->dev, "IPC irq: Firmware respond extension:%x",
header.extension);
if (CNL_IPC_GLB_NOTIFY_RSP_TYPE(header.primary)) {
/* Handle Immediate reply from DSP Core */
skl_ipc_process_reply(ipc, header);
} else {
dev_dbg(dsp->dev, "IPC irq: Notification from firmware\n");
skl_ipc_process_notification(ipc, header);
}
/* clear busy interrupt */
sst_dsp_shim_update_bits_forced(dsp, CNL_ADSP_REG_HIPCTDR,
CNL_ADSP_REG_HIPCTDR_BUSY, CNL_ADSP_REG_HIPCTDR_BUSY);
/* set done bit to ack dsp */
sst_dsp_shim_update_bits_forced(dsp, CNL_ADSP_REG_HIPCTDA,
CNL_ADSP_REG_HIPCTDA_DONE, CNL_ADSP_REG_HIPCTDA_DONE);
ipc_irq = 1;
}
if (ipc_irq == 0)
return IRQ_NONE;
cnl_ipc_int_enable(dsp);
/* continue to send any remaining messages */
schedule_work(&ipc->kwork);
return IRQ_HANDLED;
}
static struct sst_dsp_device cnl_dev = {
.thread = cnl_dsp_irq_thread_handler,
.ops = &cnl_ops,
};
static void cnl_ipc_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg)
{
struct skl_ipc_header *header = (struct skl_ipc_header *)(&msg->header);
if (msg->tx_size)
sst_dsp_outbox_write(ipc->dsp, msg->tx_data, msg->tx_size);
sst_dsp_shim_write_unlocked(ipc->dsp, CNL_ADSP_REG_HIPCIDD,
header->extension);
sst_dsp_shim_write_unlocked(ipc->dsp, CNL_ADSP_REG_HIPCIDR,
header->primary | CNL_ADSP_REG_HIPCIDR_BUSY);
}
static bool cnl_ipc_is_dsp_busy(struct sst_dsp *dsp)
{
u32 hipcidr;
hipcidr = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCIDR);
return (hipcidr & CNL_ADSP_REG_HIPCIDR_BUSY);
}
static int cnl_ipc_init(struct device *dev, struct skl_sst *cnl)
{
struct sst_generic_ipc *ipc;
int err;
ipc = &cnl->ipc;
ipc->dsp = cnl->dsp;
ipc->dev = dev;
ipc->tx_data_max_size = CNL_ADSP_W1_SZ;
ipc->rx_data_max_size = CNL_ADSP_W0_UP_SZ;
err = sst_ipc_init(ipc);
if (err)
return err;
/*
* overriding tx_msg and is_dsp_busy since
* ipc registers are different for cnl
*/
ipc->ops.tx_msg = cnl_ipc_tx_msg;
ipc->ops.tx_data_copy = skl_ipc_tx_data_copy;
ipc->ops.is_dsp_busy = cnl_ipc_is_dsp_busy;
return 0;
}
int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
struct skl_sst **dsp)
{
struct skl_sst *cnl;
struct sst_dsp *sst;
int ret;
ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &cnl_dev);
if (ret < 0) {
dev_err(dev, "%s: no device\n", __func__);
return ret;
}
cnl = *dsp;
sst = cnl->dsp;
sst->fw_ops = cnl_fw_ops;
sst->addr.lpe = mmio_base;
sst->addr.shim = mmio_base;
sst->addr.sram0_base = CNL_ADSP_SRAM0_BASE;
sst->addr.sram1_base = CNL_ADSP_SRAM1_BASE;
sst->addr.w0_stat_sz = CNL_ADSP_W0_STAT_SZ;
sst->addr.w0_up_sz = CNL_ADSP_W0_UP_SZ;
sst_dsp_mailbox_init(sst, (CNL_ADSP_SRAM0_BASE + CNL_ADSP_W0_STAT_SZ),
CNL_ADSP_W0_UP_SZ, CNL_ADSP_SRAM1_BASE,
CNL_ADSP_W1_SZ);
ret = cnl_ipc_init(dev, cnl);
if (ret) {
skl_dsp_free(sst);
return ret;
}
cnl->boot_complete = false;
init_waitqueue_head(&cnl->boot_wait);
return skl_dsp_acquire_irq(sst);
}
EXPORT_SYMBOL_GPL(cnl_sst_dsp_init);
int cnl_sst_init_fw(struct device *dev, struct skl_sst *ctx)
{
int ret;
struct sst_dsp *sst = ctx->dsp;
ret = ctx->dsp->fw_ops.load_fw(sst);
if (ret < 0) {
dev_err(dev, "load base fw failed: %d", ret);
return ret;
}
skl_dsp_init_core_state(sst);
ctx->is_first_boot = false;
return 0;
}
EXPORT_SYMBOL_GPL(cnl_sst_init_fw);
void cnl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
{
if (ctx->dsp->fw)
release_firmware(ctx->dsp->fw);
skl_freeup_uuid_list(ctx);
cnl_ipc_free(&ctx->ipc);
ctx->dsp->ops->free(ctx->dsp);
}
EXPORT_SYMBOL_GPL(cnl_sst_dsp_cleanup);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel Cannonlake IPC driver");
|