1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ssi_protocol.c
4 *
5 * Implementation of the SSI McSAAB improved protocol.
6 *
7 * Copyright (C) 2010 Nokia Corporation. All rights reserved.
8 * Copyright (C) 2013 Sebastian Reichel <sre@kernel.org>
9 *
10 * Contact: Carlos Chinea <carlos.chinea@nokia.com>
11 */
12
13#include <linux/atomic.h>
14#include <linux/clk.h>
15#include <linux/device.h>
16#include <linux/err.h>
17#include <linux/gpio.h>
18#include <linux/if_ether.h>
19#include <linux/if_arp.h>
20#include <linux/if_phonet.h>
21#include <linux/init.h>
22#include <linux/irq.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/netdevice.h>
26#include <linux/notifier.h>
27#include <linux/scatterlist.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/timer.h>
32#include <linux/hsi/hsi.h>
33#include <linux/hsi/ssi_protocol.h>
34
35#define SSIP_TXQUEUE_LEN	100
36#define SSIP_MAX_MTU		65535
37#define SSIP_DEFAULT_MTU	4000
38#define PN_MEDIA_SOS		21
39#define SSIP_MIN_PN_HDR		6	/* FIXME: Revisit */
40#define SSIP_WDTOUT		2000	/* FIXME: has to be 500 msecs */
41#define SSIP_KATOUT		15	/* 15 msecs */
42#define SSIP_MAX_CMDS		5 /* Number of pre-allocated commands buffers */
43#define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1)
44#define SSIP_CMT_LOADER_SYNC	0x11223344
45/*
46 * SSI protocol command definitions
47 */
48#define SSIP_COMMAND(data)	((data) >> 28)
49#define SSIP_PAYLOAD(data)	((data) & 0xfffffff)
50/* Commands */
51#define SSIP_SW_BREAK		0
52#define SSIP_BOOTINFO_REQ	1
53#define SSIP_BOOTINFO_RESP	2
54#define SSIP_WAKETEST_RESULT	3
55#define SSIP_START_TRANS	4
56#define SSIP_READY		5
57/* Payloads */
58#define SSIP_DATA_VERSION(data)	((data) & 0xff)
59#define SSIP_LOCAL_VERID	1
60#define SSIP_WAKETEST_OK	0
61#define SSIP_WAKETEST_FAILED	1
62#define SSIP_PDU_LENGTH(data)	(((data) >> 8) & 0xffff)
63#define SSIP_MSG_ID(data)	((data) & 0xff)
64/* Generic Command */
65#define SSIP_CMD(cmd, payload)	(((cmd) << 28) | ((payload) & 0xfffffff))
66/* Commands for the control channel */
67#define SSIP_BOOTINFO_REQ_CMD(ver) \
68		SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver))
69#define SSIP_BOOTINFO_RESP_CMD(ver) \
70		SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver))
71#define SSIP_START_TRANS_CMD(pdulen, id) \
72		SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id)))
73#define SSIP_READY_CMD		SSIP_CMD(SSIP_READY, 0)
74#define SSIP_SWBREAK_CMD	SSIP_CMD(SSIP_SW_BREAK, 0)
75
76#define SSIP_WAKETEST_FLAG 0
77
78/* Main state machine states */
79enum {
80	INIT,
81	HANDSHAKE,
82	ACTIVE,
83};
84
85/* Send state machine states */
86enum {
87	SEND_IDLE,
88	WAIT4READY,
89	SEND_READY,
90	SENDING,
91	SENDING_SWBREAK,
92};
93
94/* Receive state machine states */
95enum {
96	RECV_IDLE,
97	RECV_READY,
98	RECEIVING,
99};
100
101/**
102 * struct ssi_protocol - SSI protocol (McSAAB) data
103 * @main_state: Main state machine
104 * @send_state: TX state machine
105 * @recv_state: RX state machine
106 * @flags: Flags, currently only used to follow wake line test
107 * @rxid: RX data id
108 * @txid: TX data id
109 * @txqueue_len: TX queue length
110 * @tx_wd: TX watchdog
111 * @rx_wd: RX watchdog
112 * @keep_alive: Workaround for SSI HW bug
113 * @lock: To serialize access to this struct
114 * @netdev: Phonet network device
115 * @txqueue: TX data queue
116 * @cmdqueue: Queue of free commands
117 * @cl: HSI client own reference
118 * @link: Link for ssip_list
119 * @tx_usecount: Refcount to keep track the slaves that use the wake line
120 * @channel_id_cmd: HSI channel id for command stream
121 * @channel_id_data: HSI channel id for data stream
122 */
123struct ssi_protocol {
124	unsigned int		main_state;
125	unsigned int		send_state;
126	unsigned int		recv_state;
127	unsigned long		flags;
128	u8			rxid;
129	u8			txid;
130	unsigned int		txqueue_len;
131	struct timer_list	tx_wd;
132	struct timer_list	rx_wd;
133	struct timer_list	keep_alive; /* wake-up workaround */
134	spinlock_t		lock;
135	struct net_device	*netdev;
136	struct list_head	txqueue;
137	struct list_head	cmdqueue;
138	struct work_struct	work;
139	struct hsi_client	*cl;
140	struct list_head	link;
141	atomic_t		tx_usecnt;
142	int			channel_id_cmd;
143	int			channel_id_data;
144};
145
146/* List of ssi protocol instances */
147static LIST_HEAD(ssip_list);
148
149static void ssip_rxcmd_complete(struct hsi_msg *msg);
150
151static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd)
152{
153	u32 *data;
154
155	data = sg_virt(msg->sgt.sgl);
156	*data = cmd;
157}
158
159static inline u32 ssip_get_cmd(struct hsi_msg *msg)
160{
161	u32 *data;
162
163	data = sg_virt(msg->sgt.sgl);
164
165	return *data;
166}
167
168static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg)
169{
170	skb_frag_t *frag;
171	struct scatterlist *sg;
172	int i;
173
174	BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1));
175
176	sg = msg->sgt.sgl;
177	sg_set_buf(sg, skb->data, skb_headlen(skb));
178	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
179		sg = sg_next(sg);
180		BUG_ON(!sg);
181		frag = &skb_shinfo(skb)->frags[i];
182		sg_set_page(sg, skb_frag_page(frag), skb_frag_size(frag),
183				skb_frag_off(frag));
184	}
185}
186
187static void ssip_free_data(struct hsi_msg *msg)
188{
189	struct sk_buff *skb;
190
191	skb = msg->context;
192	pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context,
193								skb);
194	msg->destructor = NULL;
195	dev_kfree_skb(skb);
196	hsi_free_msg(msg);
197}
198
199static struct hsi_msg *ssip_alloc_data(struct ssi_protocol *ssi,
200					struct sk_buff *skb, gfp_t flags)
201{
202	struct hsi_msg *msg;
203
204	msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags);
205	if (!msg)
206		return NULL;
207	ssip_skb_to_msg(skb, msg);
208	msg->destructor = ssip_free_data;
209	msg->channel = ssi->channel_id_data;
210	msg->context = skb;
211
212	return msg;
213}
214
215static inline void ssip_release_cmd(struct hsi_msg *msg)
216{
217	struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl);
218
219	dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg));
220	spin_lock_bh(&ssi->lock);
221	list_add_tail(&msg->link, &ssi->cmdqueue);
222	spin_unlock_bh(&ssi->lock);
223}
224
225static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi)
226{
227	struct hsi_msg *msg;
228
229	BUG_ON(list_empty(&ssi->cmdqueue));
230
231	spin_lock_bh(&ssi->lock);
232	msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
233	list_del(&msg->link);
234	spin_unlock_bh(&ssi->lock);
235	msg->destructor = ssip_release_cmd;
236
237	return msg;
238}
239
240static void ssip_free_cmds(struct ssi_protocol *ssi)
241{
242	struct hsi_msg *msg, *tmp;
243
244	list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
245		list_del(&msg->link);
246		msg->destructor = NULL;
247		kfree(sg_virt(msg->sgt.sgl));
248		hsi_free_msg(msg);
249	}
250}
251
252static int ssip_alloc_cmds(struct ssi_protocol *ssi)
253{
254	struct hsi_msg *msg;
255	u32 *buf;
256	unsigned int i;
257
258	for (i = 0; i < SSIP_MAX_CMDS; i++) {
259		msg = hsi_alloc_msg(1, GFP_KERNEL);
260		if (!msg)
261			goto out;
262		buf = kmalloc(sizeof(*buf), GFP_KERNEL);
263		if (!buf) {
264			hsi_free_msg(msg);
265			goto out;
266		}
267		sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
268		msg->channel = ssi->channel_id_cmd;
269		list_add_tail(&msg->link, &ssi->cmdqueue);
270	}
271
272	return 0;
273out:
274	ssip_free_cmds(ssi);
275
276	return -ENOMEM;
277}
278
279static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state)
280{
281	ssi->recv_state = state;
282	switch (state) {
283	case RECV_IDLE:
284		del_timer(&ssi->rx_wd);
285		if (ssi->send_state == SEND_IDLE)
286			del_timer(&ssi->keep_alive);
287		break;
288	case RECV_READY:
289		/* CMT speech workaround */
290		if (atomic_read(&ssi->tx_usecnt))
291			break;
292		fallthrough;
293	case RECEIVING:
294		mod_timer(&ssi->keep_alive, jiffies +
295						msecs_to_jiffies(SSIP_KATOUT));
296		mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
297		break;
298	default:
299		break;
300	}
301}
302
303static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state)
304{
305	ssi->send_state = state;
306	switch (state) {
307	case SEND_IDLE:
308	case SEND_READY:
309		del_timer(&ssi->tx_wd);
310		if (ssi->recv_state == RECV_IDLE)
311			del_timer(&ssi->keep_alive);
312		break;
313	case WAIT4READY:
314	case SENDING:
315	case SENDING_SWBREAK:
316		mod_timer(&ssi->keep_alive,
317				jiffies + msecs_to_jiffies(SSIP_KATOUT));
318		mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
319		break;
320	default:
321		break;
322	}
323}
324
325struct hsi_client *ssip_slave_get_master(struct hsi_client *slave)
326{
327	struct hsi_client *master = ERR_PTR(-ENODEV);
328	struct ssi_protocol *ssi;
329
330	list_for_each_entry(ssi, &ssip_list, link)
331		if (slave->device.parent == ssi->cl->device.parent) {
332			master = ssi->cl;
333			break;
334		}
335
336	return master;
337}
338EXPORT_SYMBOL_GPL(ssip_slave_get_master);
339
340int ssip_slave_start_tx(struct hsi_client *master)
341{
342	struct ssi_protocol *ssi = hsi_client_drvdata(master);
343
344	dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt));
345	spin_lock_bh(&ssi->lock);
346	if (ssi->send_state == SEND_IDLE) {
347		ssip_set_txstate(ssi, WAIT4READY);
348		hsi_start_tx(master);
349	}
350	spin_unlock_bh(&ssi->lock);
351	atomic_inc(&ssi->tx_usecnt);
352
353	return 0;
354}
355EXPORT_SYMBOL_GPL(ssip_slave_start_tx);
356
357int ssip_slave_stop_tx(struct hsi_client *master)
358{
359	struct ssi_protocol *ssi = hsi_client_drvdata(master);
360
361	WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0);
362
363	if (atomic_dec_and_test(&ssi->tx_usecnt)) {
364		spin_lock_bh(&ssi->lock);
365		if ((ssi->send_state == SEND_READY) ||
366			(ssi->send_state == WAIT4READY)) {
367			ssip_set_txstate(ssi, SEND_IDLE);
368			hsi_stop_tx(master);
369		}
370		spin_unlock_bh(&ssi->lock);
371	}
372	dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt));
373
374	return 0;
375}
376EXPORT_SYMBOL_GPL(ssip_slave_stop_tx);
377
378int ssip_slave_running(struct hsi_client *master)
379{
380	struct ssi_protocol *ssi = hsi_client_drvdata(master);
381	return netif_running(ssi->netdev);
382}
383EXPORT_SYMBOL_GPL(ssip_slave_running);
384
385static void ssip_reset(struct hsi_client *cl)
386{
387	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
388	struct list_head *head, *tmp;
389	struct hsi_msg *msg;
390
391	if (netif_running(ssi->netdev))
392		netif_carrier_off(ssi->netdev);
393	hsi_flush(cl);
394	spin_lock_bh(&ssi->lock);
395	if (ssi->send_state != SEND_IDLE)
396		hsi_stop_tx(cl);
397	spin_unlock_bh(&ssi->lock);
398	if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
399		ssi_waketest(cl, 0); /* FIXME: To be removed */
400	spin_lock_bh(&ssi->lock);
401	del_timer(&ssi->rx_wd);
402	del_timer(&ssi->tx_wd);
403	del_timer(&ssi->keep_alive);
404	ssi->main_state = 0;
405	ssi->send_state = 0;
406	ssi->recv_state = 0;
407	ssi->flags = 0;
408	ssi->rxid = 0;
409	ssi->txid = 0;
410	list_for_each_safe(head, tmp, &ssi->txqueue) {
411		msg = list_entry(head, struct hsi_msg, link);
412		dev_dbg(&cl->device, "Pending TX data\n");
413		list_del(head);
414		ssip_free_data(msg);
415	}
416	ssi->txqueue_len = 0;
417	spin_unlock_bh(&ssi->lock);
418}
419
420static void ssip_dump_state(struct hsi_client *cl)
421{
422	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
423	struct hsi_msg *msg;
424
425	spin_lock_bh(&ssi->lock);
426	dev_err(&cl->device, "Main state: %d\n", ssi->main_state);
427	dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state);
428	dev_err(&cl->device, "Send state: %d\n", ssi->send_state);
429	dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ?
430							"Online" : "Offline");
431	dev_err(&cl->device, "Wake test %d\n",
432				test_bit(SSIP_WAKETEST_FLAG, &ssi->flags));
433	dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid);
434	dev_err(&cl->device, "Data TX id: %d\n", ssi->txid);
435
436	list_for_each_entry(msg, &ssi->txqueue, link)
437		dev_err(&cl->device, "pending TX data (%p)\n", msg);
438	spin_unlock_bh(&ssi->lock);
439}
440
441static void ssip_error(struct hsi_client *cl)
442{
443	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
444	struct hsi_msg *msg;
445
446	ssip_dump_state(cl);
447	ssip_reset(cl);
448	msg = ssip_claim_cmd(ssi);
449	msg->complete = ssip_rxcmd_complete;
450	hsi_async_read(cl, msg);
451}
452
453static void ssip_keep_alive(struct timer_list *t)
454{
455	struct ssi_protocol *ssi = from_timer(ssi, t, keep_alive);
456	struct hsi_client *cl = ssi->cl;
457
458	dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n",
459		ssi->main_state, ssi->recv_state, ssi->send_state);
460
461	spin_lock(&ssi->lock);
462	if (ssi->recv_state == RECV_IDLE)
463		switch (ssi->send_state) {
464		case SEND_READY:
465			if (atomic_read(&ssi->tx_usecnt) == 0)
466				break;
467			fallthrough;
468			/*
469			 * Workaround for cmt-speech in that case
470			 * we relay on audio timers.
471			 */
472		case SEND_IDLE:
473			spin_unlock(&ssi->lock);
474			return;
475		}
476	mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT));
477	spin_unlock(&ssi->lock);
478}
479
480static void ssip_rx_wd(struct timer_list *t)
481{
482	struct ssi_protocol *ssi = from_timer(ssi, t, rx_wd);
483	struct hsi_client *cl = ssi->cl;
484
485	dev_err(&cl->device, "Watchdog triggered\n");
486	ssip_error(cl);
487}
488
489static void ssip_tx_wd(struct timer_list *t)
490{
491	struct ssi_protocol *ssi = from_timer(ssi, t, tx_wd);
492	struct hsi_client *cl = ssi->cl;
493
494	dev_err(&cl->device, "Watchdog triggered\n");
495	ssip_error(cl);
496}
497
498static void ssip_send_bootinfo_req_cmd(struct hsi_client *cl)
499{
500	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
501	struct hsi_msg *msg;
502
503	dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n");
504	msg = ssip_claim_cmd(ssi);
505	ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID));
506	msg->complete = ssip_release_cmd;
507	hsi_async_write(cl, msg);
508	dev_dbg(&cl->device, "Issuing RX command\n");
509	msg = ssip_claim_cmd(ssi);
510	msg->complete = ssip_rxcmd_complete;
511	hsi_async_read(cl, msg);
512}
513
514static void ssip_start_rx(struct hsi_client *cl)
515{
516	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
517	struct hsi_msg *msg;
518
519	dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state,
520						ssi->recv_state);
521	spin_lock_bh(&ssi->lock);
522	/*
523	 * We can have two UP events in a row due to a short low
524	 * high transition. Therefore we need to ignore the sencond UP event.
525	 */
526	if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) {
527		spin_unlock_bh(&ssi->lock);
528		return;
529	}
530	ssip_set_rxstate(ssi, RECV_READY);
531	spin_unlock_bh(&ssi->lock);
532
533	msg = ssip_claim_cmd(ssi);
534	ssip_set_cmd(msg, SSIP_READY_CMD);
535	msg->complete = ssip_release_cmd;
536	dev_dbg(&cl->device, "Send READY\n");
537	hsi_async_write(cl, msg);
538}
539
540static void ssip_stop_rx(struct hsi_client *cl)
541{
542	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
543
544	dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state);
545	spin_lock_bh(&ssi->lock);
546	if (likely(ssi->main_state == ACTIVE))
547		ssip_set_rxstate(ssi, RECV_IDLE);
548	spin_unlock_bh(&ssi->lock);
549}
550
551static void ssip_free_strans(struct hsi_msg *msg)
552{
553	ssip_free_data(msg->context);
554	ssip_release_cmd(msg);
555}
556
557static void ssip_strans_complete(struct hsi_msg *msg)
558{
559	struct hsi_client *cl = msg->cl;
560	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
561	struct hsi_msg *data;
562
563	data = msg->context;
564	ssip_release_cmd(msg);
565	spin_lock_bh(&ssi->lock);
566	ssip_set_txstate(ssi, SENDING);
567	spin_unlock_bh(&ssi->lock);
568	hsi_async_write(cl, data);
569}
570
571static int ssip_xmit(struct hsi_client *cl)
572{
573	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
574	struct hsi_msg *msg, *dmsg;
575	struct sk_buff *skb;
576
577	spin_lock_bh(&ssi->lock);
578	if (list_empty(&ssi->txqueue)) {
579		spin_unlock_bh(&ssi->lock);
580		return 0;
581	}
582	dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link);
583	list_del(&dmsg->link);
584	ssi->txqueue_len--;
585	spin_unlock_bh(&ssi->lock);
586
587	msg = ssip_claim_cmd(ssi);
588	skb = dmsg->context;
589	msg->context = dmsg;
590	msg->complete = ssip_strans_complete;
591	msg->destructor = ssip_free_strans;
592
593	spin_lock_bh(&ssi->lock);
594	ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len),
595								ssi->txid));
596	ssi->txid++;
597	ssip_set_txstate(ssi, SENDING);
598	spin_unlock_bh(&ssi->lock);
599
600	dev_dbg(&cl->device, "Send STRANS (%d frames)\n",
601						SSIP_BYTES_TO_FRAMES(skb->len));
602
603	return hsi_async_write(cl, msg);
604}
605
606/* In soft IRQ context */
607static void ssip_pn_rx(struct sk_buff *skb)
608{
609	struct net_device *dev = skb->dev;
610
611	if (unlikely(!netif_running(dev))) {
612		dev_dbg(&dev->dev, "Drop RX packet\n");
613		dev->stats.rx_dropped++;
614		dev_kfree_skb(skb);
615		return;
616	}
617	if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) {
618		dev_dbg(&dev->dev, "Error drop RX packet\n");
619		dev->stats.rx_errors++;
620		dev->stats.rx_length_errors++;
621		dev_kfree_skb(skb);
622		return;
623	}
624	dev->stats.rx_packets++;
625	dev->stats.rx_bytes += skb->len;
626
627	/* length field is exchanged in network byte order */
628	((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]);
629	dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n",
630			((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2]));
631
632	skb->protocol = htons(ETH_P_PHONET);
633	skb_reset_mac_header(skb);
634	__skb_pull(skb, 1);
635	netif_rx(skb);
636}
637
638static void ssip_rx_data_complete(struct hsi_msg *msg)
639{
640	struct hsi_client *cl = msg->cl;
641	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
642	struct sk_buff *skb;
643
644	if (msg->status == HSI_STATUS_ERROR) {
645		dev_err(&cl->device, "RX data error\n");
646		ssip_free_data(msg);
647		ssip_error(cl);
648		return;
649	}
650	del_timer(&ssi->rx_wd); /* FIXME: Revisit */
651	skb = msg->context;
652	ssip_pn_rx(skb);
653	hsi_free_msg(msg);
654}
655
656static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
657{
658	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
659	struct hsi_msg *msg;
660
661	/* Workaroud: Ignore CMT Loader message leftover */
662	if (cmd == SSIP_CMT_LOADER_SYNC)
663		return;
664
665	switch (ssi->main_state) {
666	case ACTIVE:
667		dev_err(&cl->device, "Boot info req on active state\n");
668		ssip_error(cl);
669		fallthrough;
670	case INIT:
671	case HANDSHAKE:
672		spin_lock_bh(&ssi->lock);
673		ssi->main_state = HANDSHAKE;
674		spin_unlock_bh(&ssi->lock);
675
676		if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
677			ssi_waketest(cl, 1); /* FIXME: To be removed */
678
679		spin_lock_bh(&ssi->lock);
680		/* Start boot handshake watchdog */
681		mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
682		spin_unlock_bh(&ssi->lock);
683		dev_dbg(&cl->device, "Send BOOTINFO_RESP\n");
684		if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
685			dev_warn(&cl->device, "boot info req verid mismatch\n");
686		msg = ssip_claim_cmd(ssi);
687		ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID));
688		msg->complete = ssip_release_cmd;
689		hsi_async_write(cl, msg);
690		break;
691	default:
692		dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state);
693		break;
694	}
695}
696
697static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd)
698{
699	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
700
701	if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
702		dev_warn(&cl->device, "boot info resp verid mismatch\n");
703
704	spin_lock_bh(&ssi->lock);
705	if (ssi->main_state != ACTIVE)
706		/* Use tx_wd as a boot watchdog in non ACTIVE state */
707		mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
708	else
709		dev_dbg(&cl->device, "boot info resp ignored M(%d)\n",
710							ssi->main_state);
711	spin_unlock_bh(&ssi->lock);
712}
713
714static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd)
715{
716	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
717	unsigned int wkres = SSIP_PAYLOAD(cmd);
718
719	spin_lock_bh(&ssi->lock);
720	if (ssi->main_state != HANDSHAKE) {
721		dev_dbg(&cl->device, "wake lines test ignored M(%d)\n",
722							ssi->main_state);
723		spin_unlock_bh(&ssi->lock);
724		return;
725	}
726	spin_unlock_bh(&ssi->lock);
727
728	if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
729		ssi_waketest(cl, 0); /* FIXME: To be removed */
730
731	spin_lock_bh(&ssi->lock);
732	ssi->main_state = ACTIVE;
733	del_timer(&ssi->tx_wd); /* Stop boot handshake timer */
734	spin_unlock_bh(&ssi->lock);
735
736	dev_notice(&cl->device, "WAKELINES TEST %s\n",
737				wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK");
738	if (wkres & SSIP_WAKETEST_FAILED) {
739		ssip_error(cl);
740		return;
741	}
742	dev_dbg(&cl->device, "CMT is ONLINE\n");
743	netif_wake_queue(ssi->netdev);
744	netif_carrier_on(ssi->netdev);
745}
746
747static void ssip_rx_ready(struct hsi_client *cl)
748{
749	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
750
751	spin_lock_bh(&ssi->lock);
752	if (unlikely(ssi->main_state != ACTIVE)) {
753		dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n",
754					ssi->send_state, ssi->main_state);
755		spin_unlock_bh(&ssi->lock);
756		return;
757	}
758	if (ssi->send_state != WAIT4READY) {
759		dev_dbg(&cl->device, "Ignore spurious READY command\n");
760		spin_unlock_bh(&ssi->lock);
761		return;
762	}
763	ssip_set_txstate(ssi, SEND_READY);
764	spin_unlock_bh(&ssi->lock);
765	ssip_xmit(cl);
766}
767
768static void ssip_rx_strans(struct hsi_client *cl, u32 cmd)
769{
770	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
771	struct sk_buff *skb;
772	struct hsi_msg *msg;
773	int len = SSIP_PDU_LENGTH(cmd);
774
775	dev_dbg(&cl->device, "RX strans: %d frames\n", len);
776	spin_lock_bh(&ssi->lock);
777	if (unlikely(ssi->main_state != ACTIVE)) {
778		dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n",
779					ssi->send_state, ssi->main_state);
780		spin_unlock_bh(&ssi->lock);
781		return;
782	}
783	ssip_set_rxstate(ssi, RECEIVING);
784	if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) {
785		dev_err(&cl->device, "START TRANS id %d expected %d\n",
786					SSIP_MSG_ID(cmd), ssi->rxid);
787		spin_unlock_bh(&ssi->lock);
788		goto out1;
789	}
790	ssi->rxid++;
791	spin_unlock_bh(&ssi->lock);
792	skb = netdev_alloc_skb(ssi->netdev, len * 4);
793	if (unlikely(!skb)) {
794		dev_err(&cl->device, "No memory for rx skb\n");
795		goto out1;
796	}
797	skb_put(skb, len * 4);
798	msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
799	if (unlikely(!msg)) {
800		dev_err(&cl->device, "No memory for RX data msg\n");
801		goto out2;
802	}
803	msg->complete = ssip_rx_data_complete;
804	hsi_async_read(cl, msg);
805
806	return;
807out2:
808	dev_kfree_skb(skb);
809out1:
810	ssip_error(cl);
811}
812
813static void ssip_rxcmd_complete(struct hsi_msg *msg)
814{
815	struct hsi_client *cl = msg->cl;
816	u32 cmd = ssip_get_cmd(msg);
817	unsigned int cmdid = SSIP_COMMAND(cmd);
818
819	if (msg->status == HSI_STATUS_ERROR) {
820		dev_err(&cl->device, "RX error detected\n");
821		ssip_release_cmd(msg);
822		ssip_error(cl);
823		return;
824	}
825	hsi_async_read(cl, msg);
826	dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd);
827	switch (cmdid) {
828	case SSIP_SW_BREAK:
829		/* Ignored */
830		break;
831	case SSIP_BOOTINFO_REQ:
832		ssip_rx_bootinforeq(cl, cmd);
833		break;
834	case SSIP_BOOTINFO_RESP:
835		ssip_rx_bootinforesp(cl, cmd);
836		break;
837	case SSIP_WAKETEST_RESULT:
838		ssip_rx_waketest(cl, cmd);
839		break;
840	case SSIP_START_TRANS:
841		ssip_rx_strans(cl, cmd);
842		break;
843	case SSIP_READY:
844		ssip_rx_ready(cl);
845		break;
846	default:
847		dev_warn(&cl->device, "command 0x%08x not supported\n", cmd);
848		break;
849	}
850}
851
852static void ssip_swbreak_complete(struct hsi_msg *msg)
853{
854	struct hsi_client *cl = msg->cl;
855	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
856
857	ssip_release_cmd(msg);
858	spin_lock_bh(&ssi->lock);
859	if (list_empty(&ssi->txqueue)) {
860		if (atomic_read(&ssi->tx_usecnt)) {
861			ssip_set_txstate(ssi, SEND_READY);
862		} else {
863			ssip_set_txstate(ssi, SEND_IDLE);
864			hsi_stop_tx(cl);
865		}
866		spin_unlock_bh(&ssi->lock);
867	} else {
868		spin_unlock_bh(&ssi->lock);
869		ssip_xmit(cl);
870	}
871	netif_wake_queue(ssi->netdev);
872}
873
874static void ssip_tx_data_complete(struct hsi_msg *msg)
875{
876	struct hsi_client *cl = msg->cl;
877	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
878	struct hsi_msg *cmsg;
879
880	if (msg->status == HSI_STATUS_ERROR) {
881		dev_err(&cl->device, "TX data error\n");
882		ssip_error(cl);
883		goto out;
884	}
885	spin_lock_bh(&ssi->lock);
886	if (list_empty(&ssi->txqueue)) {
887		ssip_set_txstate(ssi, SENDING_SWBREAK);
888		spin_unlock_bh(&ssi->lock);
889		cmsg = ssip_claim_cmd(ssi);
890		ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD);
891		cmsg->complete = ssip_swbreak_complete;
892		dev_dbg(&cl->device, "Send SWBREAK\n");
893		hsi_async_write(cl, cmsg);
894	} else {
895		spin_unlock_bh(&ssi->lock);
896		ssip_xmit(cl);
897	}
898out:
899	ssip_free_data(msg);
900}
901
902static void ssip_port_event(struct hsi_client *cl, unsigned long event)
903{
904	switch (event) {
905	case HSI_EVENT_START_RX:
906		ssip_start_rx(cl);
907		break;
908	case HSI_EVENT_STOP_RX:
909		ssip_stop_rx(cl);
910		break;
911	default:
912		return;
913	}
914}
915
916static int ssip_pn_open(struct net_device *dev)
917{
918	struct hsi_client *cl = to_hsi_client(dev->dev.parent);
919	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
920	int err;
921
922	err = hsi_claim_port(cl, 1);
923	if (err < 0) {
924		dev_err(&cl->device, "SSI port already claimed\n");
925		return err;
926	}
927	err = hsi_register_port_event(cl, ssip_port_event);
928	if (err < 0) {
929		dev_err(&cl->device, "Register HSI port event failed (%d)\n",
930			err);
931		hsi_release_port(cl);
932		return err;
933	}
934	dev_dbg(&cl->device, "Configuring SSI port\n");
935	hsi_setup(cl);
936
937	if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
938		ssi_waketest(cl, 1); /* FIXME: To be removed */
939
940	spin_lock_bh(&ssi->lock);
941	ssi->main_state = HANDSHAKE;
942	spin_unlock_bh(&ssi->lock);
943
944	ssip_send_bootinfo_req_cmd(cl);
945
946	return 0;
947}
948
949static int ssip_pn_stop(struct net_device *dev)
950{
951	struct hsi_client *cl = to_hsi_client(dev->dev.parent);
952
953	ssip_reset(cl);
954	hsi_unregister_port_event(cl);
955	hsi_release_port(cl);
956
957	return 0;
958}
959
960static void ssip_xmit_work(struct work_struct *work)
961{
962	struct ssi_protocol *ssi =
963				container_of(work, struct ssi_protocol, work);
964	struct hsi_client *cl = ssi->cl;
965
966	ssip_xmit(cl);
967}
968
969static netdev_tx_t ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
970{
971	struct hsi_client *cl = to_hsi_client(dev->dev.parent);
972	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
973	struct hsi_msg *msg;
974
975	if ((skb->protocol != htons(ETH_P_PHONET)) ||
976					(skb->len < SSIP_MIN_PN_HDR))
977		goto drop;
978	/* Pad to 32-bits - FIXME: Revisit*/
979	if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3)))
980		goto inc_dropped;
981
982	/*
983	 * Modem sends Phonet messages over SSI with its own endianness.
984	 * Assume that modem has the same endianness as we do.
985	 */
986	if (skb_cow_head(skb, 0))
987		goto drop;
988
989	/* length field is exchanged in network byte order */
990	((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]);
991
992	msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
993	if (!msg) {
994		dev_dbg(&cl->device, "Dropping tx data: No memory\n");
995		goto drop;
996	}
997	msg->complete = ssip_tx_data_complete;
998
999	spin_lock_bh(&ssi->lock);
1000	if (unlikely(ssi->main_state != ACTIVE)) {
1001		spin_unlock_bh(&ssi->lock);
1002		dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n");
1003		goto drop2;
1004	}
1005	list_add_tail(&msg->link, &ssi->txqueue);
1006	ssi->txqueue_len++;
1007	if (dev->tx_queue_len < ssi->txqueue_len) {
1008		dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len);
1009		netif_stop_queue(dev);
1010	}
1011	if (ssi->send_state == SEND_IDLE) {
1012		ssip_set_txstate(ssi, WAIT4READY);
1013		spin_unlock_bh(&ssi->lock);
1014		dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len);
1015		hsi_start_tx(cl);
1016	} else if (ssi->send_state == SEND_READY) {
1017		/* Needed for cmt-speech workaround */
1018		dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n",
1019							ssi->txqueue_len);
1020		spin_unlock_bh(&ssi->lock);
1021		schedule_work(&ssi->work);
1022	} else {
1023		spin_unlock_bh(&ssi->lock);
1024	}
1025	dev->stats.tx_packets++;
1026	dev->stats.tx_bytes += skb->len;
1027
1028	return NETDEV_TX_OK;
1029drop2:
1030	hsi_free_msg(msg);
1031drop:
1032	dev_kfree_skb(skb);
1033inc_dropped:
1034	dev->stats.tx_dropped++;
1035
1036	return NETDEV_TX_OK;
1037}
1038
1039/* CMT reset event handler */
1040void ssip_reset_event(struct hsi_client *master)
1041{
1042	struct ssi_protocol *ssi = hsi_client_drvdata(master);
1043	dev_err(&ssi->cl->device, "CMT reset detected!\n");
1044	ssip_error(ssi->cl);
1045}
1046EXPORT_SYMBOL_GPL(ssip_reset_event);
1047
1048static const struct net_device_ops ssip_pn_ops = {
1049	.ndo_open	= ssip_pn_open,
1050	.ndo_stop	= ssip_pn_stop,
1051	.ndo_start_xmit	= ssip_pn_xmit,
1052};
1053
1054static void ssip_pn_setup(struct net_device *dev)
1055{
1056	static const u8 addr = PN_MEDIA_SOS;
1057
1058	dev->features		= 0;
1059	dev->netdev_ops		= &ssip_pn_ops;
1060	dev->type		= ARPHRD_PHONET;
1061	dev->flags		= IFF_POINTOPOINT | IFF_NOARP;
1062	dev->mtu		= SSIP_DEFAULT_MTU;
1063	dev->hard_header_len	= 1;
1064	dev->addr_len		= 1;
1065	dev_addr_set(dev, &addr);
1066	dev->tx_queue_len	= SSIP_TXQUEUE_LEN;
1067
1068	dev->needs_free_netdev	= true;
1069	dev->header_ops		= &phonet_header_ops;
1070}
1071
1072static int ssi_protocol_probe(struct device *dev)
1073{
1074	static const char ifname[] = "phonet%d";
1075	struct hsi_client *cl = to_hsi_client(dev);
1076	struct ssi_protocol *ssi;
1077	int err;
1078
1079	ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
1080	if (!ssi)
1081		return -ENOMEM;
1082
1083	spin_lock_init(&ssi->lock);
1084	timer_setup(&ssi->rx_wd, ssip_rx_wd, TIMER_DEFERRABLE);
1085	timer_setup(&ssi->tx_wd, ssip_tx_wd, TIMER_DEFERRABLE);
1086	timer_setup(&ssi->keep_alive, ssip_keep_alive, 0);
1087	INIT_LIST_HEAD(&ssi->txqueue);
1088	INIT_LIST_HEAD(&ssi->cmdqueue);
1089	atomic_set(&ssi->tx_usecnt, 0);
1090	hsi_client_set_drvdata(cl, ssi);
1091	ssi->cl = cl;
1092	INIT_WORK(&ssi->work, ssip_xmit_work);
1093
1094	ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control");
1095	if (ssi->channel_id_cmd < 0) {
1096		err = ssi->channel_id_cmd;
1097		dev_err(dev, "Could not get cmd channel (%d)\n", err);
1098		goto out;
1099	}
1100
1101	ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data");
1102	if (ssi->channel_id_data < 0) {
1103		err = ssi->channel_id_data;
1104		dev_err(dev, "Could not get data channel (%d)\n", err);
1105		goto out;
1106	}
1107
1108	err = ssip_alloc_cmds(ssi);
1109	if (err < 0) {
1110		dev_err(dev, "No memory for commands\n");
1111		goto out;
1112	}
1113
1114	ssi->netdev = alloc_netdev(0, ifname, NET_NAME_UNKNOWN, ssip_pn_setup);
1115	if (!ssi->netdev) {
1116		dev_err(dev, "No memory for netdev\n");
1117		err = -ENOMEM;
1118		goto out1;
1119	}
1120
1121	/* MTU range: 6 - 65535 */
1122	ssi->netdev->min_mtu = PHONET_MIN_MTU;
1123	ssi->netdev->max_mtu = SSIP_MAX_MTU;
1124
1125	SET_NETDEV_DEV(ssi->netdev, dev);
1126	netif_carrier_off(ssi->netdev);
1127	err = register_netdev(ssi->netdev);
1128	if (err < 0) {
1129		dev_err(dev, "Register netdev failed (%d)\n", err);
1130		goto out2;
1131	}
1132
1133	list_add(&ssi->link, &ssip_list);
1134
1135	dev_dbg(dev, "channel configuration: cmd=%d, data=%d\n",
1136		ssi->channel_id_cmd, ssi->channel_id_data);
1137
1138	return 0;
1139out2:
1140	free_netdev(ssi->netdev);
1141out1:
1142	ssip_free_cmds(ssi);
1143out:
1144	kfree(ssi);
1145
1146	return err;
1147}
1148
1149static int ssi_protocol_remove(struct device *dev)
1150{
1151	struct hsi_client *cl = to_hsi_client(dev);
1152	struct ssi_protocol *ssi = hsi_client_drvdata(cl);
1153
1154	list_del(&ssi->link);
1155	unregister_netdev(ssi->netdev);
1156	ssip_free_cmds(ssi);
1157	hsi_client_set_drvdata(cl, NULL);
1158	kfree(ssi);
1159
1160	return 0;
1161}
1162
1163static struct hsi_client_driver ssip_driver = {
1164	.driver = {
1165		.name	= "ssi-protocol",
1166		.owner	= THIS_MODULE,
1167		.probe	= ssi_protocol_probe,
1168		.remove	= ssi_protocol_remove,
1169	},
1170};
1171
1172static int __init ssip_init(void)
1173{
1174	pr_info("SSI protocol aka McSAAB added\n");
1175
1176	return hsi_register_client_driver(&ssip_driver);
1177}
1178module_init(ssip_init);
1179
1180static void __exit ssip_exit(void)
1181{
1182	hsi_unregister_client_driver(&ssip_driver);
1183	pr_info("SSI protocol driver removed\n");
1184}
1185module_exit(ssip_exit);
1186
1187MODULE_ALIAS("hsi:ssi-protocol");
1188MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
1189MODULE_AUTHOR("Remi Denis-Courmont <remi.denis-courmont@nokia.com>");
1190MODULE_DESCRIPTION("SSI protocol improved aka McSAAB");
1191MODULE_LICENSE("GPL");
1192