1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright(c) 2019-2020  Realtek Corporation
3 */
4
5#include <linux/devcoredump.h>
6
7#include "cam.h"
8#include "chan.h"
9#include "debug.h"
10#include "fw.h"
11#include "mac.h"
12#include "ps.h"
13#include "reg.h"
14#include "ser.h"
15#include "util.h"
16
17#define SER_RECFG_TIMEOUT 1000
18
19enum ser_evt {
20	SER_EV_NONE,
21	SER_EV_STATE_IN,
22	SER_EV_STATE_OUT,
23	SER_EV_L1_RESET_PREPARE, /* pre-M0 */
24	SER_EV_L1_RESET, /* M1 */
25	SER_EV_DO_RECOVERY, /* M3 */
26	SER_EV_MAC_RESET_DONE, /* M5 */
27	SER_EV_L2_RESET,
28	SER_EV_L2_RECFG_DONE,
29	SER_EV_L2_RECFG_TIMEOUT,
30	SER_EV_M1_TIMEOUT,
31	SER_EV_M3_TIMEOUT,
32	SER_EV_FW_M5_TIMEOUT,
33	SER_EV_L0_RESET,
34	SER_EV_MAXX
35};
36
37enum ser_state {
38	SER_IDLE_ST,
39	SER_L1_RESET_PRE_ST,
40	SER_RESET_TRX_ST,
41	SER_DO_HCI_ST,
42	SER_L2_RESET_ST,
43	SER_ST_MAX_ST
44};
45
46struct ser_msg {
47	struct list_head list;
48	u8 event;
49};
50
51struct state_ent {
52	u8 state;
53	char *name;
54	void (*st_func)(struct rtw89_ser *ser, u8 event);
55};
56
57struct event_ent {
58	u8 event;
59	char *name;
60};
61
62static char *ser_ev_name(struct rtw89_ser *ser, u8 event)
63{
64	if (event < SER_EV_MAXX)
65		return ser->ev_tbl[event].name;
66
67	return "err_ev_name";
68}
69
70static char *ser_st_name(struct rtw89_ser *ser)
71{
72	if (ser->state < SER_ST_MAX_ST)
73		return ser->st_tbl[ser->state].name;
74
75	return "err_st_name";
76}
77
78#define RTW89_DEF_SER_CD_TYPE(_name, _type, _size) \
79struct ser_cd_ ## _name { \
80	u32 type; \
81	u32 type_size; \
82	u64 padding; \
83	u8 data[_size]; \
84} __packed; \
85static void ser_cd_ ## _name ## _init(struct ser_cd_ ## _name *p) \
86{ \
87	p->type = _type; \
88	p->type_size = sizeof(p->data); \
89	p->padding = 0x0123456789abcdef; \
90}
91
92enum rtw89_ser_cd_type {
93	RTW89_SER_CD_FW_RSVD_PLE	= 0,
94	RTW89_SER_CD_FW_BACKTRACE	= 1,
95};
96
97RTW89_DEF_SER_CD_TYPE(fw_rsvd_ple,
98		      RTW89_SER_CD_FW_RSVD_PLE,
99		      RTW89_FW_RSVD_PLE_SIZE);
100
101RTW89_DEF_SER_CD_TYPE(fw_backtrace,
102		      RTW89_SER_CD_FW_BACKTRACE,
103		      RTW89_FW_BACKTRACE_MAX_SIZE);
104
105struct rtw89_ser_cd_buffer {
106	struct ser_cd_fw_rsvd_ple fwple;
107	struct ser_cd_fw_backtrace fwbt;
108} __packed;
109
110static struct rtw89_ser_cd_buffer *rtw89_ser_cd_prep(struct rtw89_dev *rtwdev)
111{
112	struct rtw89_ser_cd_buffer *buf;
113
114	buf = vzalloc(sizeof(*buf));
115	if (!buf)
116		return NULL;
117
118	ser_cd_fw_rsvd_ple_init(&buf->fwple);
119	ser_cd_fw_backtrace_init(&buf->fwbt);
120
121	return buf;
122}
123
124static void rtw89_ser_cd_send(struct rtw89_dev *rtwdev,
125			      struct rtw89_ser_cd_buffer *buf)
126{
127	rtw89_debug(rtwdev, RTW89_DBG_SER, "SER sends core dump\n");
128
129	/* After calling dev_coredump, buf's lifetime is supposed to be
130	 * handled by the device coredump framework. Note that a new dump
131	 * will be discarded if a previous one hasn't been released by
132	 * framework yet.
133	 */
134	dev_coredumpv(rtwdev->dev, buf, sizeof(*buf), GFP_KERNEL);
135}
136
137static void rtw89_ser_cd_free(struct rtw89_dev *rtwdev,
138			      struct rtw89_ser_cd_buffer *buf, bool free_self)
139{
140	if (!free_self)
141		return;
142
143	rtw89_debug(rtwdev, RTW89_DBG_SER, "SER frees core dump by self\n");
144
145	/* When some problems happen during filling data of core dump,
146	 * we won't send it to device coredump framework. Instead, we
147	 * free buf by ourselves.
148	 */
149	vfree(buf);
150}
151
152static void ser_state_run(struct rtw89_ser *ser, u8 evt)
153{
154	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
155
156	rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n",
157		    ser_st_name(ser), ser_ev_name(ser, evt));
158
159	mutex_lock(&rtwdev->mutex);
160	rtw89_leave_lps(rtwdev);
161	mutex_unlock(&rtwdev->mutex);
162
163	ser->st_tbl[ser->state].st_func(ser, evt);
164}
165
166static void ser_state_goto(struct rtw89_ser *ser, u8 new_state)
167{
168	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
169
170	if (ser->state == new_state || new_state >= SER_ST_MAX_ST)
171		return;
172	ser_state_run(ser, SER_EV_STATE_OUT);
173
174	rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s goto -> %s\n",
175		    ser_st_name(ser), ser->st_tbl[new_state].name);
176
177	ser->state = new_state;
178	ser_state_run(ser, SER_EV_STATE_IN);
179}
180
181static struct ser_msg *__rtw89_ser_dequeue_msg(struct rtw89_ser *ser)
182{
183	struct ser_msg *msg;
184
185	spin_lock_irq(&ser->msg_q_lock);
186	msg = list_first_entry_or_null(&ser->msg_q, struct ser_msg, list);
187	if (msg)
188		list_del(&msg->list);
189	spin_unlock_irq(&ser->msg_q_lock);
190
191	return msg;
192}
193
194static void rtw89_ser_hdl_work(struct work_struct *work)
195{
196	struct ser_msg *msg;
197	struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
198					     ser_hdl_work);
199
200	while ((msg = __rtw89_ser_dequeue_msg(ser))) {
201		ser_state_run(ser, msg->event);
202		kfree(msg);
203	}
204}
205
206static int ser_send_msg(struct rtw89_ser *ser, u8 event)
207{
208	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
209	struct ser_msg *msg = NULL;
210
211	if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
212		return -EIO;
213
214	msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
215	if (!msg)
216		return -ENOMEM;
217
218	msg->event = event;
219
220	spin_lock_irq(&ser->msg_q_lock);
221	list_add(&msg->list, &ser->msg_q);
222	spin_unlock_irq(&ser->msg_q_lock);
223
224	ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work);
225	return 0;
226}
227
228static void rtw89_ser_alarm_work(struct work_struct *work)
229{
230	struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
231					     ser_alarm_work.work);
232
233	ser_send_msg(ser, ser->alarm_event);
234	ser->alarm_event = SER_EV_NONE;
235}
236
237static void ser_set_alarm(struct rtw89_ser *ser, u32 ms, u8 event)
238{
239	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
240
241	if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
242		return;
243
244	ser->alarm_event = event;
245	ieee80211_queue_delayed_work(rtwdev->hw, &ser->ser_alarm_work,
246				     msecs_to_jiffies(ms));
247}
248
249static void ser_del_alarm(struct rtw89_ser *ser)
250{
251	cancel_delayed_work(&ser->ser_alarm_work);
252	ser->alarm_event = SER_EV_NONE;
253}
254
255/* driver function */
256static void drv_stop_tx(struct rtw89_ser *ser)
257{
258	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
259
260	ieee80211_stop_queues(rtwdev->hw);
261	set_bit(RTW89_SER_DRV_STOP_TX, ser->flags);
262}
263
264static void drv_stop_rx(struct rtw89_ser *ser)
265{
266	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
267
268	clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
269	set_bit(RTW89_SER_DRV_STOP_RX, ser->flags);
270}
271
272static void drv_trx_reset(struct rtw89_ser *ser)
273{
274	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
275
276	rtw89_hci_reset(rtwdev);
277}
278
279static void drv_resume_tx(struct rtw89_ser *ser)
280{
281	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
282
283	if (!test_bit(RTW89_SER_DRV_STOP_TX, ser->flags))
284		return;
285
286	ieee80211_wake_queues(rtwdev->hw);
287	clear_bit(RTW89_SER_DRV_STOP_TX, ser->flags);
288}
289
290static void drv_resume_rx(struct rtw89_ser *ser)
291{
292	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
293
294	if (!test_bit(RTW89_SER_DRV_STOP_RX, ser->flags))
295		return;
296
297	set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
298	clear_bit(RTW89_SER_DRV_STOP_RX, ser->flags);
299}
300
301static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
302{
303	rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
304	rtwvif->net_type = RTW89_NET_TYPE_NO_LINK;
305	rtwvif->trigger = false;
306	rtwvif->tdls_peer = 0;
307}
308
309static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta)
310{
311	struct rtw89_vif *rtwvif = (struct rtw89_vif *)data;
312	struct rtw89_dev *rtwdev = rtwvif->rtwdev;
313	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
314
315	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
316		rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
317	if (sta->tdls)
318		rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
319
320	INIT_LIST_HEAD(&rtwsta->ba_cam_list);
321}
322
323static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
324{
325	ieee80211_iterate_stations_atomic(rtwdev->hw,
326					  ser_sta_deinit_cam_iter,
327					  rtwvif);
328
329	rtw89_cam_deinit(rtwdev, rtwvif);
330
331	bitmap_zero(rtwdev->cam_info.ba_cam_map, RTW89_MAX_BA_CAM_NUM);
332}
333
334static void ser_reset_mac_binding(struct rtw89_dev *rtwdev)
335{
336	struct rtw89_vif *rtwvif;
337
338	rtw89_cam_reset_keys(rtwdev);
339	rtw89_for_each_rtwvif(rtwdev, rtwvif)
340		ser_deinit_cam(rtwdev, rtwvif);
341
342	rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM);
343	rtw89_for_each_rtwvif(rtwdev, rtwvif)
344		ser_reset_vif(rtwdev, rtwvif);
345
346	rtwdev->total_sta_assoc = 0;
347}
348
349/* hal function */
350static int hal_enable_dma(struct rtw89_ser *ser)
351{
352	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
353	int ret;
354
355	if (!test_bit(RTW89_SER_HAL_STOP_DMA, ser->flags))
356		return 0;
357
358	if (!rtwdev->hci.ops->mac_lv1_rcvy)
359		return -EIO;
360
361	ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_2);
362	if (!ret)
363		clear_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
364
365	return ret;
366}
367
368static int hal_stop_dma(struct rtw89_ser *ser)
369{
370	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
371	int ret;
372
373	if (!rtwdev->hci.ops->mac_lv1_rcvy)
374		return -EIO;
375
376	ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_1);
377	if (!ret)
378		set_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
379
380	return ret;
381}
382
383static void hal_send_post_m0_event(struct rtw89_ser *ser)
384{
385	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
386
387	rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RESET_START_DMAC);
388}
389
390static void hal_send_m2_event(struct rtw89_ser *ser)
391{
392	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
393
394	rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_DISABLE_EN);
395}
396
397static void hal_send_m4_event(struct rtw89_ser *ser)
398{
399	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
400
401	rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RCVY_EN);
402}
403
404/* state handler */
405static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
406{
407	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
408
409	switch (evt) {
410	case SER_EV_STATE_IN:
411		rtw89_hci_recovery_complete(rtwdev);
412		clear_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags);
413		clear_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
414		break;
415	case SER_EV_L1_RESET_PREPARE:
416		ser_state_goto(ser, SER_L1_RESET_PRE_ST);
417		break;
418	case SER_EV_L1_RESET:
419		ser_state_goto(ser, SER_RESET_TRX_ST);
420		break;
421	case SER_EV_L2_RESET:
422		ser_state_goto(ser, SER_L2_RESET_ST);
423		break;
424	case SER_EV_STATE_OUT:
425		set_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags);
426		rtw89_hci_recovery_start(rtwdev);
427		break;
428	default:
429		break;
430	}
431}
432
433static void ser_l1_reset_pre_st_hdl(struct rtw89_ser *ser, u8 evt)
434{
435	switch (evt) {
436	case SER_EV_STATE_IN:
437		ser->prehandle_l1 = true;
438		hal_send_post_m0_event(ser);
439		ser_set_alarm(ser, 1000, SER_EV_M1_TIMEOUT);
440		break;
441	case SER_EV_L1_RESET:
442		ser_state_goto(ser, SER_RESET_TRX_ST);
443		break;
444	case SER_EV_M1_TIMEOUT:
445		ser_state_goto(ser, SER_L2_RESET_ST);
446		break;
447	case SER_EV_STATE_OUT:
448		ser_del_alarm(ser);
449		break;
450	default:
451		break;
452	}
453}
454
455static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
456{
457	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
458
459	switch (evt) {
460	case SER_EV_STATE_IN:
461		cancel_delayed_work_sync(&rtwdev->track_work);
462		drv_stop_tx(ser);
463
464		if (hal_stop_dma(ser)) {
465			ser_state_goto(ser, SER_L2_RESET_ST);
466			break;
467		}
468
469		drv_stop_rx(ser);
470		drv_trx_reset(ser);
471
472		/* wait m3 */
473		hal_send_m2_event(ser);
474
475		/* set alarm to prevent FW response timeout */
476		ser_set_alarm(ser, 1000, SER_EV_M3_TIMEOUT);
477		break;
478
479	case SER_EV_DO_RECOVERY:
480		ser_state_goto(ser, SER_DO_HCI_ST);
481		break;
482
483	case SER_EV_M3_TIMEOUT:
484		ser_state_goto(ser, SER_L2_RESET_ST);
485		break;
486
487	case SER_EV_STATE_OUT:
488		ser_del_alarm(ser);
489		hal_enable_dma(ser);
490		drv_resume_rx(ser);
491		drv_resume_tx(ser);
492		ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work,
493					     RTW89_TRACK_WORK_PERIOD);
494		break;
495
496	default:
497		break;
498	}
499}
500
501static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt)
502{
503	switch (evt) {
504	case SER_EV_STATE_IN:
505		/* wait m5 */
506		hal_send_m4_event(ser);
507
508		/* prevent FW response timeout */
509		ser_set_alarm(ser, 1000, SER_EV_FW_M5_TIMEOUT);
510		break;
511
512	case SER_EV_FW_M5_TIMEOUT:
513		ser_state_goto(ser, SER_L2_RESET_ST);
514		break;
515
516	case SER_EV_MAC_RESET_DONE:
517		ser_state_goto(ser, SER_IDLE_ST);
518		break;
519
520	case SER_EV_STATE_OUT:
521		ser_del_alarm(ser);
522		break;
523
524	default:
525		break;
526	}
527}
528
529static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf,
530			     u8 sel, u32 start_addr, u32 len)
531{
532	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
533	u32 filter_model_addr = mac->filter_model_addr;
534	u32 indir_access_addr = mac->indir_access_addr;
535	u32 *ptr = (u32 *)buf;
536	u32 base_addr, start_page, residue;
537	u32 cnt = 0;
538	u32 i;
539
540	start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE;
541	residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE;
542	base_addr = mac->mem_base_addrs[sel];
543	base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
544
545	while (cnt < len) {
546		rtw89_write32(rtwdev, filter_model_addr, base_addr);
547
548		for (i = indir_access_addr + residue;
549		     i < indir_access_addr + MAC_MEM_DUMP_PAGE_SIZE;
550		     i += 4, ptr++) {
551			*ptr = rtw89_read32(rtwdev, i);
552			cnt += 4;
553			if (cnt >= len)
554				break;
555		}
556
557		residue = 0;
558		base_addr += MAC_MEM_DUMP_PAGE_SIZE;
559	}
560}
561
562static void rtw89_ser_fw_rsvd_ple_dump(struct rtw89_dev *rtwdev, u8 *buf)
563{
564	u32 start_addr = rtwdev->chip->rsvd_ple_ofst;
565
566	rtw89_debug(rtwdev, RTW89_DBG_SER,
567		    "dump mem for fw rsvd payload engine (start addr: 0x%x)\n",
568		    start_addr);
569	ser_mac_mem_dump(rtwdev, buf, RTW89_MAC_MEM_SHARED_BUF, start_addr,
570			 RTW89_FW_RSVD_PLE_SIZE);
571}
572
573struct __fw_backtrace_entry {
574	u32 wcpu_addr;
575	u32 size;
576	u32 key;
577} __packed;
578
579struct __fw_backtrace_info {
580	u32 ra;
581	u32 sp;
582} __packed;
583
584static_assert(RTW89_FW_BACKTRACE_INFO_SIZE ==
585	      sizeof(struct __fw_backtrace_info));
586
587static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf,
588				       const struct __fw_backtrace_entry *ent)
589{
590	struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf;
591	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
592	u32 filter_model_addr = mac->filter_model_addr;
593	u32 indir_access_addr = mac->indir_access_addr;
594	u32 fwbt_addr = ent->wcpu_addr & RTW89_WCPU_BASE_MASK;
595	u32 fwbt_size = ent->size;
596	u32 fwbt_key = ent->key;
597	u32 i;
598
599	if (fwbt_addr == 0) {
600		rtw89_warn(rtwdev, "FW backtrace invalid address: 0x%x\n",
601			   fwbt_addr);
602		return -EINVAL;
603	}
604
605	if (fwbt_key != RTW89_FW_BACKTRACE_KEY) {
606		rtw89_warn(rtwdev, "FW backtrace invalid key: 0x%x\n",
607			   fwbt_key);
608		return -EINVAL;
609	}
610
611	if (fwbt_size == 0 || !RTW89_VALID_FW_BACKTRACE_SIZE(fwbt_size) ||
612	    fwbt_size > RTW89_FW_BACKTRACE_MAX_SIZE) {
613		rtw89_warn(rtwdev, "FW backtrace invalid size: 0x%x\n",
614			   fwbt_size);
615		return -EINVAL;
616	}
617
618	rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace start\n");
619	rtw89_write32(rtwdev, filter_model_addr, fwbt_addr);
620
621	for (i = indir_access_addr;
622	     i < indir_access_addr + fwbt_size;
623	     i += RTW89_FW_BACKTRACE_INFO_SIZE, ptr++) {
624		*ptr = (struct __fw_backtrace_info){
625			.ra = rtw89_read32(rtwdev, i),
626			.sp = rtw89_read32(rtwdev, i + 4),
627		};
628		rtw89_debug(rtwdev, RTW89_DBG_SER,
629			    "next sp: 0x%x, next ra: 0x%x\n",
630			    ptr->sp, ptr->ra);
631	}
632
633	rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace end\n");
634	return 0;
635}
636
637static void ser_l2_reset_st_pre_hdl(struct rtw89_ser *ser)
638{
639	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
640	struct rtw89_ser_cd_buffer *buf;
641	struct __fw_backtrace_entry fwbt_ent;
642	int ret = 0;
643
644	buf = rtw89_ser_cd_prep(rtwdev);
645	if (!buf) {
646		ret = -ENOMEM;
647		goto bottom;
648	}
649
650	rtw89_ser_fw_rsvd_ple_dump(rtwdev, buf->fwple.data);
651
652	fwbt_ent = *(struct __fw_backtrace_entry *)buf->fwple.data;
653	ret = rtw89_ser_fw_backtrace_dump(rtwdev, buf->fwbt.data, &fwbt_ent);
654	if (ret)
655		goto bottom;
656
657	rtw89_ser_cd_send(rtwdev, buf);
658
659bottom:
660	rtw89_ser_cd_free(rtwdev, buf, !!ret);
661
662	ser_reset_mac_binding(rtwdev);
663	rtw89_core_stop(rtwdev);
664	rtw89_entity_init(rtwdev);
665	rtw89_fw_release_general_pkt_list(rtwdev, false);
666	INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
667}
668
669static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
670{
671	struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
672
673	switch (evt) {
674	case SER_EV_STATE_IN:
675		mutex_lock(&rtwdev->mutex);
676		ser_l2_reset_st_pre_hdl(ser);
677		mutex_unlock(&rtwdev->mutex);
678
679		ieee80211_restart_hw(rtwdev->hw);
680		ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT);
681		break;
682
683	case SER_EV_L2_RECFG_TIMEOUT:
684		rtw89_info(rtwdev, "Err: ser L2 re-config timeout\n");
685		fallthrough;
686	case SER_EV_L2_RECFG_DONE:
687		ser_state_goto(ser, SER_IDLE_ST);
688		break;
689
690	case SER_EV_STATE_OUT:
691		ser_del_alarm(ser);
692		break;
693
694	default:
695		break;
696	}
697}
698
699static const struct event_ent ser_ev_tbl[] = {
700	{SER_EV_NONE, "SER_EV_NONE"},
701	{SER_EV_STATE_IN, "SER_EV_STATE_IN"},
702	{SER_EV_STATE_OUT, "SER_EV_STATE_OUT"},
703	{SER_EV_L1_RESET_PREPARE, "SER_EV_L1_RESET_PREPARE pre-m0"},
704	{SER_EV_L1_RESET, "SER_EV_L1_RESET m1"},
705	{SER_EV_DO_RECOVERY, "SER_EV_DO_RECOVERY m3"},
706	{SER_EV_MAC_RESET_DONE, "SER_EV_MAC_RESET_DONE m5"},
707	{SER_EV_L2_RESET, "SER_EV_L2_RESET"},
708	{SER_EV_L2_RECFG_DONE, "SER_EV_L2_RECFG_DONE"},
709	{SER_EV_L2_RECFG_TIMEOUT, "SER_EV_L2_RECFG_TIMEOUT"},
710	{SER_EV_M1_TIMEOUT, "SER_EV_M1_TIMEOUT"},
711	{SER_EV_M3_TIMEOUT, "SER_EV_M3_TIMEOUT"},
712	{SER_EV_FW_M5_TIMEOUT, "SER_EV_FW_M5_TIMEOUT"},
713	{SER_EV_L0_RESET, "SER_EV_L0_RESET"},
714	{SER_EV_MAXX, "SER_EV_MAX"}
715};
716
717static const struct state_ent ser_st_tbl[] = {
718	{SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl},
719	{SER_L1_RESET_PRE_ST, "SER_L1_RESET_PRE_ST", ser_l1_reset_pre_st_hdl},
720	{SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl},
721	{SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl},
722	{SER_L2_RESET_ST, "SER_L2_RESET_ST", ser_l2_reset_st_hdl}
723};
724
725int rtw89_ser_init(struct rtw89_dev *rtwdev)
726{
727	struct rtw89_ser *ser = &rtwdev->ser;
728
729	memset(ser, 0, sizeof(*ser));
730	INIT_LIST_HEAD(&ser->msg_q);
731	ser->state = SER_IDLE_ST;
732	ser->st_tbl = ser_st_tbl;
733	ser->ev_tbl = ser_ev_tbl;
734
735	bitmap_zero(ser->flags, RTW89_NUM_OF_SER_FLAGS);
736	spin_lock_init(&ser->msg_q_lock);
737	INIT_WORK(&ser->ser_hdl_work, rtw89_ser_hdl_work);
738	INIT_DELAYED_WORK(&ser->ser_alarm_work, rtw89_ser_alarm_work);
739	return 0;
740}
741
742int rtw89_ser_deinit(struct rtw89_dev *rtwdev)
743{
744	struct rtw89_ser *ser = (struct rtw89_ser *)&rtwdev->ser;
745
746	set_bit(RTW89_SER_DRV_STOP_RUN, ser->flags);
747	cancel_delayed_work_sync(&ser->ser_alarm_work);
748	cancel_work_sync(&ser->ser_hdl_work);
749	clear_bit(RTW89_SER_DRV_STOP_RUN, ser->flags);
750	return 0;
751}
752
753void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev)
754{
755	ser_send_msg(&rtwdev->ser, SER_EV_L2_RECFG_DONE);
756}
757
758int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err)
759{
760	u8 event = SER_EV_NONE;
761
762	rtw89_info(rtwdev, "SER catches error: 0x%x\n", err);
763
764	switch (err) {
765	case MAC_AX_ERR_L1_PREERR_DMAC: /* pre-M0 */
766		event = SER_EV_L1_RESET_PREPARE;
767		break;
768	case MAC_AX_ERR_L1_ERR_DMAC:
769	case MAC_AX_ERR_L0_PROMOTE_TO_L1:
770		event = SER_EV_L1_RESET; /* M1 */
771		break;
772	case MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE:
773		event = SER_EV_DO_RECOVERY; /* M3 */
774		break;
775	case MAC_AX_ERR_L1_RESET_RECOVERY_DONE:
776		event = SER_EV_MAC_RESET_DONE; /* M5 */
777		break;
778	case MAC_AX_ERR_L0_ERR_CMAC0:
779	case MAC_AX_ERR_L0_ERR_CMAC1:
780	case MAC_AX_ERR_L0_RESET_DONE:
781		event = SER_EV_L0_RESET;
782		break;
783	default:
784		if (err == MAC_AX_ERR_L1_PROMOTE_TO_L2 ||
785		    (err >= MAC_AX_ERR_L2_ERR_AH_DMA &&
786		     err <= MAC_AX_GET_ERR_MAX))
787			event = SER_EV_L2_RESET;
788		break;
789	}
790
791	if (event == SER_EV_NONE) {
792		rtw89_warn(rtwdev, "SER cannot recognize error: 0x%x\n", err);
793		return -EINVAL;
794	}
795
796	ser_send_msg(&rtwdev->ser, event);
797	return 0;
798}
799EXPORT_SYMBOL(rtw89_ser_notify);
800