1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  sst_ipc.c - Intel SST Driver for audio engine
4 *
5 *  Copyright (C) 2008-14 Intel Corporation
6 *  Authors:	Vinod Koul <vinod.koul@intel.com>
7 *		Harsha Priya <priya.harsha@intel.com>
8 *		Dharageswari R <dharageswari.r@intel.com>
9 *		KP Jeeja <jeeja.kp@intel.com>
10 *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13 */
14#include <linux/pci.h>
15#include <linux/firmware.h>
16#include <linux/sched.h>
17#include <linux/delay.h>
18#include <sound/core.h>
19#include <sound/pcm.h>
20#include <sound/soc.h>
21#include <sound/compress_driver.h>
22#include <asm/intel-mid.h>
23#include <asm/platform_sst_audio.h>
24#include "../sst-mfld-platform.h"
25#include "sst.h"
26
27struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
28					u32 msg_id, u32 drv_id)
29{
30	struct sst_block *msg;
31
32	dev_dbg(ctx->dev, "Enter\n");
33	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
34	if (!msg)
35		return NULL;
36	msg->condition = false;
37	msg->on = true;
38	msg->msg_id = msg_id;
39	msg->drv_id = drv_id;
40	spin_lock_bh(&ctx->block_lock);
41	list_add_tail(&msg->node, &ctx->block_list);
42	spin_unlock_bh(&ctx->block_lock);
43
44	return msg;
45}
46
47/*
48 * while handling the interrupts, we need to check for message status and
49 * then if we are blocking for a message
50 *
51 * here we are unblocking the blocked ones, this is based on id we have
52 * passed and search that for block threads.
53 * We will not find block in two cases
54 *  a) when its small message and block in not there, so silently ignore
55 *  them
56 *  b) when we are actually not able to find the block (bug perhaps)
57 *
58 *  Since we have bit of small messages we can spam kernel log with err
59 *  print on above so need to keep as debug prints which should be enabled
60 *  via dynamic debug while debugging IPC issues
61 */
62int sst_wake_up_block(struct intel_sst_drv *ctx, int result,
63		u32 drv_id, u32 ipc, void *data, u32 size)
64{
65	struct sst_block *block;
66
67	dev_dbg(ctx->dev, "Enter\n");
68
69	spin_lock_bh(&ctx->block_lock);
70	list_for_each_entry(block, &ctx->block_list, node) {
71		dev_dbg(ctx->dev, "Block ipc %d, drv_id %d\n", block->msg_id,
72							block->drv_id);
73		if (block->msg_id == ipc && block->drv_id == drv_id) {
74			dev_dbg(ctx->dev, "free up the block\n");
75			block->ret_code = result;
76			block->data = data;
77			block->size = size;
78			block->condition = true;
79			spin_unlock_bh(&ctx->block_lock);
80			wake_up(&ctx->wait_queue);
81			return 0;
82		}
83	}
84	spin_unlock_bh(&ctx->block_lock);
85	dev_dbg(ctx->dev,
86		"Block not found or a response received for a short msg for ipc %d, drv_id %d\n",
87		ipc, drv_id);
88	return -EINVAL;
89}
90
91int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed)
92{
93	struct sst_block *block, *__block;
94
95	dev_dbg(ctx->dev, "Enter\n");
96	spin_lock_bh(&ctx->block_lock);
97	list_for_each_entry_safe(block, __block, &ctx->block_list, node) {
98		if (block == freed) {
99			pr_debug("pvt_id freed --> %d\n", freed->drv_id);
100			/* toggle the index position of pvt_id */
101			list_del(&freed->node);
102			spin_unlock_bh(&ctx->block_lock);
103			kfree(freed->data);
104			freed->data = NULL;
105			kfree(freed);
106			return 0;
107		}
108	}
109	spin_unlock_bh(&ctx->block_lock);
110	dev_err(ctx->dev, "block is already freed!!!\n");
111	return -EINVAL;
112}
113
114int sst_post_message_mrfld(struct intel_sst_drv *sst_drv_ctx,
115		struct ipc_post *ipc_msg, bool sync)
116{
117	struct ipc_post *msg = ipc_msg;
118	union ipc_header_mrfld header;
119	unsigned int loop_count = 0;
120	int retval = 0;
121	unsigned long irq_flags;
122
123	dev_dbg(sst_drv_ctx->dev, "Enter: sync: %d\n", sync);
124	spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
125	header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
126	if (sync) {
127		while (header.p.header_high.part.busy) {
128			if (loop_count > 25) {
129				dev_err(sst_drv_ctx->dev,
130					"sst: Busy wait failed, can't send this msg\n");
131				retval = -EBUSY;
132				goto out;
133			}
134			cpu_relax();
135			loop_count++;
136			header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
137		}
138	} else {
139		if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
140			/* queue is empty, nothing to send */
141			spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
142			dev_dbg(sst_drv_ctx->dev,
143					"Empty msg queue... NO Action\n");
144			return 0;
145		}
146
147		if (header.p.header_high.part.busy) {
148			spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
149			dev_dbg(sst_drv_ctx->dev, "Busy not free... post later\n");
150			return 0;
151		}
152
153		/* copy msg from list */
154		msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
155				struct ipc_post, node);
156		list_del(&msg->node);
157	}
158	dev_dbg(sst_drv_ctx->dev, "sst: Post message: header = %x\n",
159				msg->mrfld_header.p.header_high.full);
160	dev_dbg(sst_drv_ctx->dev, "sst: size = 0x%x\n",
161			msg->mrfld_header.p.header_low_payload);
162
163	if (msg->mrfld_header.p.header_high.part.large)
164		memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
165			msg->mailbox_data,
166			msg->mrfld_header.p.header_low_payload);
167
168	sst_shim_write64(sst_drv_ctx->shim, SST_IPCX, msg->mrfld_header.full);
169
170out:
171	spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
172	kfree(msg->mailbox_data);
173	kfree(msg);
174	return retval;
175}
176
177void intel_sst_clear_intr_mrfld(struct intel_sst_drv *sst_drv_ctx)
178{
179	union interrupt_reg_mrfld isr;
180	union interrupt_reg_mrfld imr;
181	union ipc_header_mrfld clear_ipc;
182	unsigned long irq_flags;
183
184	spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
185	imr.full = sst_shim_read64(sst_drv_ctx->shim, SST_IMRX);
186	isr.full = sst_shim_read64(sst_drv_ctx->shim, SST_ISRX);
187
188	/* write 1 to clear*/
189	isr.part.busy_interrupt = 1;
190	sst_shim_write64(sst_drv_ctx->shim, SST_ISRX, isr.full);
191
192	/* Set IA done bit */
193	clear_ipc.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCD);
194
195	clear_ipc.p.header_high.part.busy = 0;
196	clear_ipc.p.header_high.part.done = 1;
197	clear_ipc.p.header_low_payload = IPC_ACK_SUCCESS;
198	sst_shim_write64(sst_drv_ctx->shim, SST_IPCD, clear_ipc.full);
199	/* un mask busy interrupt */
200	imr.part.busy_interrupt = 0;
201	sst_shim_write64(sst_drv_ctx->shim, SST_IMRX, imr.full);
202	spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
203}
204
205
206/*
207 * process_fw_init - process the FW init msg
208 *
209 * @msg: IPC message mailbox data from FW
210 *
211 * This function processes the FW init msg from FW
212 * marks FW state and prints debug info of loaded FW
213 */
214static void process_fw_init(struct intel_sst_drv *sst_drv_ctx,
215			void *msg)
216{
217	struct ipc_header_fw_init *init =
218		(struct ipc_header_fw_init *)msg;
219	int retval = 0;
220
221	dev_dbg(sst_drv_ctx->dev, "*** FW Init msg came***\n");
222	if (init->result) {
223		sst_set_fw_state_locked(sst_drv_ctx, SST_RESET);
224		dev_err(sst_drv_ctx->dev, "FW Init failed, Error %x\n",
225				init->result);
226		retval = init->result;
227		goto ret;
228	}
229	if (memcmp(&sst_drv_ctx->fw_version, &init->fw_version,
230		   sizeof(init->fw_version)))
231		dev_info(sst_drv_ctx->dev, "FW Version %02x.%02x.%02x.%02x\n",
232			init->fw_version.type, init->fw_version.major,
233			init->fw_version.minor, init->fw_version.build);
234	dev_dbg(sst_drv_ctx->dev, "Build date %s Time %s\n",
235			init->build_info.date, init->build_info.time);
236
237	/* Save FW version */
238	sst_drv_ctx->fw_version.type = init->fw_version.type;
239	sst_drv_ctx->fw_version.major = init->fw_version.major;
240	sst_drv_ctx->fw_version.minor = init->fw_version.minor;
241	sst_drv_ctx->fw_version.build = init->fw_version.build;
242
243ret:
244	sst_wake_up_block(sst_drv_ctx, retval, FW_DWNL_ID, 0 , NULL, 0);
245}
246
247static void process_fw_async_msg(struct intel_sst_drv *sst_drv_ctx,
248			struct ipc_post *msg)
249{
250	u32 msg_id;
251	int str_id;
252	u32 data_size, i;
253	void *data_offset;
254	struct stream_info *stream;
255	u32 msg_low, pipe_id;
256
257	msg_low = msg->mrfld_header.p.header_low_payload;
258	msg_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->cmd_id;
259	data_offset = (msg->mailbox_data + sizeof(struct ipc_dsp_hdr));
260	data_size =  msg_low - (sizeof(struct ipc_dsp_hdr));
261
262	switch (msg_id) {
263	case IPC_SST_PERIOD_ELAPSED_MRFLD:
264		pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
265		str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
266		if (str_id > 0) {
267			dev_dbg(sst_drv_ctx->dev,
268				"Period elapsed rcvd for pipe id 0x%x\n",
269				pipe_id);
270			stream = &sst_drv_ctx->streams[str_id];
271			/* If stream is dropped, skip processing this message*/
272			if (stream->status == STREAM_INIT)
273				break;
274			if (stream->period_elapsed)
275				stream->period_elapsed(stream->pcm_substream);
276			if (stream->compr_cb)
277				stream->compr_cb(stream->compr_cb_param);
278		}
279		break;
280
281	case IPC_IA_DRAIN_STREAM_MRFLD:
282		pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
283		str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
284		if (str_id > 0) {
285			stream = &sst_drv_ctx->streams[str_id];
286			if (stream->drain_notify)
287				stream->drain_notify(stream->drain_cb_param);
288		}
289		break;
290
291	case IPC_IA_FW_ASYNC_ERR_MRFLD:
292		dev_err(sst_drv_ctx->dev, "FW sent async error msg:\n");
293		for (i = 0; i < (data_size/4); i++)
294			print_hex_dump(KERN_DEBUG, NULL, DUMP_PREFIX_NONE,
295					16, 4, data_offset, data_size, false);
296		break;
297
298	case IPC_IA_FW_INIT_CMPLT_MRFLD:
299		process_fw_init(sst_drv_ctx, data_offset);
300		break;
301
302	case IPC_IA_BUF_UNDER_RUN_MRFLD:
303		pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
304		str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
305		if (str_id > 0)
306			dev_err(sst_drv_ctx->dev,
307				"Buffer under-run for pipe:%#x str_id:%d\n",
308				pipe_id, str_id);
309		break;
310
311	default:
312		dev_err(sst_drv_ctx->dev,
313			"Unrecognized async msg from FW msg_id %#x\n", msg_id);
314	}
315}
316
317void sst_process_reply_mrfld(struct intel_sst_drv *sst_drv_ctx,
318		struct ipc_post *msg)
319{
320	unsigned int drv_id;
321	void *data;
322	union ipc_header_high msg_high;
323	u32 msg_low;
324	struct ipc_dsp_hdr *dsp_hdr;
325
326	msg_high = msg->mrfld_header.p.header_high;
327	msg_low = msg->mrfld_header.p.header_low_payload;
328
329	dev_dbg(sst_drv_ctx->dev, "IPC process message header %x payload %x\n",
330			msg->mrfld_header.p.header_high.full,
331			msg->mrfld_header.p.header_low_payload);
332
333	drv_id = msg_high.part.drv_id;
334
335	/* Check for async messages first */
336	if (drv_id == SST_ASYNC_DRV_ID) {
337		/*FW sent async large message*/
338		process_fw_async_msg(sst_drv_ctx, msg);
339		return;
340	}
341
342	/* FW sent short error response for an IPC */
343	if (msg_high.part.result && !msg_high.part.large) {
344		/* 32-bit FW error code in msg_low */
345		dev_err(sst_drv_ctx->dev, "FW sent error response 0x%x", msg_low);
346		sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
347			msg_high.part.drv_id,
348			msg_high.part.msg_id, NULL, 0);
349		return;
350	}
351
352	/*
353	 * Process all valid responses
354	 * if it is a large message, the payload contains the size to
355	 * copy from mailbox
356	 **/
357	if (msg_high.part.large) {
358		data = kmemdup((void *)msg->mailbox_data, msg_low, GFP_KERNEL);
359		if (!data)
360			return;
361		/* Copy command id so that we can use to put sst to reset */
362		dsp_hdr = (struct ipc_dsp_hdr *)data;
363		dev_dbg(sst_drv_ctx->dev, "cmd_id %d\n", dsp_hdr->cmd_id);
364		if (sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
365				msg_high.part.drv_id,
366				msg_high.part.msg_id, data, msg_low))
367			kfree(data);
368	} else {
369		sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
370				msg_high.part.drv_id,
371				msg_high.part.msg_id, NULL, 0);
372	}
373
374}
375