1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 */
5#include <linux/dma-mapping.h>
6#include "hal_tx.h"
7#include "debug.h"
8#include "hal_desc.h"
9#include "hif.h"
10
11static const struct hal_srng_config hw_srng_config_template[] = {
12	/* TODO: max_rings can populated by querying HW capabilities */
13	{ /* REO_DST */
14		.start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
15		.max_rings = 4,
16		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
17		.lmac_ring = false,
18		.ring_dir = HAL_SRNG_DIR_DST,
19		.max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
20	},
21	{ /* REO_EXCEPTION */
22		/* Designating REO2TCL ring as exception ring. This ring is
23		 * similar to other REO2SW rings though it is named as REO2TCL.
24		 * Any of theREO2SW rings can be used as exception ring.
25		 */
26		.start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
27		.max_rings = 1,
28		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
29		.lmac_ring = false,
30		.ring_dir = HAL_SRNG_DIR_DST,
31		.max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
32	},
33	{ /* REO_REINJECT */
34		.start_ring_id = HAL_SRNG_RING_ID_SW2REO,
35		.max_rings = 1,
36		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
37		.lmac_ring = false,
38		.ring_dir = HAL_SRNG_DIR_SRC,
39		.max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
40	},
41	{ /* REO_CMD */
42		.start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
43		.max_rings = 1,
44		.entry_size = (sizeof(struct hal_tlv_hdr) +
45			sizeof(struct hal_reo_get_queue_stats)) >> 2,
46		.lmac_ring = false,
47		.ring_dir = HAL_SRNG_DIR_SRC,
48		.max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
49	},
50	{ /* REO_STATUS */
51		.start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
52		.max_rings = 1,
53		.entry_size = (sizeof(struct hal_tlv_hdr) +
54			sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
55		.lmac_ring = false,
56		.ring_dir = HAL_SRNG_DIR_DST,
57		.max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
58	},
59	{ /* TCL_DATA */
60		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
61		.max_rings = 3,
62		.entry_size = (sizeof(struct hal_tlv_hdr) +
63			     sizeof(struct hal_tcl_data_cmd)) >> 2,
64		.lmac_ring = false,
65		.ring_dir = HAL_SRNG_DIR_SRC,
66		.max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
67	},
68	{ /* TCL_CMD */
69		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
70		.max_rings = 1,
71		.entry_size = (sizeof(struct hal_tlv_hdr) +
72			     sizeof(struct hal_tcl_gse_cmd)) >> 2,
73		.lmac_ring =  false,
74		.ring_dir = HAL_SRNG_DIR_SRC,
75		.max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
76	},
77	{ /* TCL_STATUS */
78		.start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
79		.max_rings = 1,
80		.entry_size = (sizeof(struct hal_tlv_hdr) +
81			     sizeof(struct hal_tcl_status_ring)) >> 2,
82		.lmac_ring = false,
83		.ring_dir = HAL_SRNG_DIR_DST,
84		.max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
85	},
86	{ /* CE_SRC */
87		.start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
88		.max_rings = 12,
89		.entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
90		.lmac_ring = false,
91		.ring_dir = HAL_SRNG_DIR_SRC,
92		.reg_start = {
93			(HAL_SEQ_WCSS_UMAC_CE0_SRC_REG +
94			 HAL_CE_DST_RING_BASE_LSB),
95			HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP,
96		},
97		.reg_size = {
98			(HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
99			 HAL_SEQ_WCSS_UMAC_CE0_SRC_REG),
100			(HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
101			 HAL_SEQ_WCSS_UMAC_CE0_SRC_REG),
102		},
103		.max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
104	},
105	{ /* CE_DST */
106		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
107		.max_rings = 12,
108		.entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
109		.lmac_ring = false,
110		.ring_dir = HAL_SRNG_DIR_SRC,
111		.reg_start = {
112			(HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
113			 HAL_CE_DST_RING_BASE_LSB),
114			HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP,
115		},
116		.reg_size = {
117			(HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
118			 HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
119			(HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
120			 HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
121		},
122		.max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
123	},
124	{ /* CE_DST_STATUS */
125		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
126		.max_rings = 12,
127		.entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
128		.lmac_ring = false,
129		.ring_dir = HAL_SRNG_DIR_DST,
130		.reg_start = {
131			(HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
132			 HAL_CE_DST_STATUS_RING_BASE_LSB),
133			(HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
134			 HAL_CE_DST_STATUS_RING_HP),
135		},
136		.reg_size = {
137			(HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
138			 HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
139			(HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
140			 HAL_SEQ_WCSS_UMAC_CE0_DST_REG),
141		},
142		.max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
143	},
144	{ /* WBM_IDLE_LINK */
145		.start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
146		.max_rings = 1,
147		.entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
148		.lmac_ring = false,
149		.ring_dir = HAL_SRNG_DIR_SRC,
150		.reg_start = {
151			(HAL_SEQ_WCSS_UMAC_WBM_REG +
152			 HAL_WBM_IDLE_LINK_RING_BASE_LSB),
153			(HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP),
154		},
155		.max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
156	},
157	{ /* SW2WBM_RELEASE */
158		.start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
159		.max_rings = 1,
160		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
161		.lmac_ring = false,
162		.ring_dir = HAL_SRNG_DIR_SRC,
163		.reg_start = {
164			(HAL_SEQ_WCSS_UMAC_WBM_REG +
165			 HAL_WBM_RELEASE_RING_BASE_LSB),
166			(HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP),
167		},
168		.max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
169	},
170	{ /* WBM2SW_RELEASE */
171		.start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
172		.max_rings = 4,
173		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
174		.lmac_ring = false,
175		.ring_dir = HAL_SRNG_DIR_DST,
176		.reg_start = {
177			(HAL_SEQ_WCSS_UMAC_WBM_REG +
178			 HAL_WBM0_RELEASE_RING_BASE_LSB),
179			(HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP),
180		},
181		.reg_size = {
182			(HAL_WBM1_RELEASE_RING_BASE_LSB -
183			 HAL_WBM0_RELEASE_RING_BASE_LSB),
184			(HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP),
185		},
186		.max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
187	},
188	{ /* RXDMA_BUF */
189		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
190		.max_rings = 2,
191		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
192		.lmac_ring = true,
193		.ring_dir = HAL_SRNG_DIR_SRC,
194		.max_size = HAL_RXDMA_RING_MAX_SIZE,
195	},
196	{ /* RXDMA_DST */
197		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
198		.max_rings = 1,
199		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
200		.lmac_ring = true,
201		.ring_dir = HAL_SRNG_DIR_DST,
202		.max_size = HAL_RXDMA_RING_MAX_SIZE,
203	},
204	{ /* RXDMA_MONITOR_BUF */
205		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
206		.max_rings = 1,
207		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
208		.lmac_ring = true,
209		.ring_dir = HAL_SRNG_DIR_SRC,
210		.max_size = HAL_RXDMA_RING_MAX_SIZE,
211	},
212	{ /* RXDMA_MONITOR_STATUS */
213		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
214		.max_rings = 1,
215		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
216		.lmac_ring = true,
217		.ring_dir = HAL_SRNG_DIR_SRC,
218		.max_size = HAL_RXDMA_RING_MAX_SIZE,
219	},
220	{ /* RXDMA_MONITOR_DST */
221		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
222		.max_rings = 1,
223		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
224		.lmac_ring = true,
225		.ring_dir = HAL_SRNG_DIR_DST,
226		.max_size = HAL_RXDMA_RING_MAX_SIZE,
227	},
228	{ /* RXDMA_MONITOR_DESC */
229		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
230		.max_rings = 1,
231		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
232		.lmac_ring = true,
233		.ring_dir = HAL_SRNG_DIR_SRC,
234		.max_size = HAL_RXDMA_RING_MAX_SIZE,
235	},
236	{ /* RXDMA DIR BUF */
237		.start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
238		.max_rings = 1,
239		.entry_size = 8 >> 2, /* TODO: Define the struct */
240		.lmac_ring = true,
241		.ring_dir = HAL_SRNG_DIR_SRC,
242		.max_size = HAL_RXDMA_RING_MAX_SIZE,
243	},
244};
245
246static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab)
247{
248	struct ath11k_hal *hal = &ab->hal;
249	size_t size;
250
251	size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
252	hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
253					    GFP_KERNEL);
254	if (!hal->rdp.vaddr)
255		return -ENOMEM;
256
257	return 0;
258}
259
260static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab)
261{
262	struct ath11k_hal *hal = &ab->hal;
263	size_t size;
264
265	if (!hal->rdp.vaddr)
266		return;
267
268	size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
269	dma_free_coherent(ab->dev, size,
270			  hal->rdp.vaddr, hal->rdp.paddr);
271	hal->rdp.vaddr = NULL;
272}
273
274static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab)
275{
276	struct ath11k_hal *hal = &ab->hal;
277	size_t size;
278
279	size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
280	hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
281					    GFP_KERNEL);
282	if (!hal->wrp.vaddr)
283		return -ENOMEM;
284
285	return 0;
286}
287
288static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab)
289{
290	struct ath11k_hal *hal = &ab->hal;
291	size_t size;
292
293	if (!hal->wrp.vaddr)
294		return;
295
296	size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
297	dma_free_coherent(ab->dev, size,
298			  hal->wrp.vaddr, hal->wrp.paddr);
299	hal->wrp.vaddr = NULL;
300}
301
302static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
303				    struct hal_srng *srng, int ring_num)
304{
305	struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST];
306	u32 addr;
307	u32 val;
308
309	addr = HAL_CE_DST_RING_CTRL +
310	       srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
311	       ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
312
313	val = ath11k_hif_read32(ab, addr);
314	val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
315	val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
316			  srng->u.dst_ring.max_buffer_length);
317	ath11k_hif_write32(ab, addr, val);
318}
319
320static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
321					struct hal_srng *srng)
322{
323	struct ath11k_hal *hal = &ab->hal;
324	u32 val;
325	u64 hp_addr;
326	u32 reg_base;
327
328	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
329
330	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
331		ath11k_hif_write32(ab, reg_base +
332				   HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab),
333				   srng->msi_addr);
334
335		val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
336				 ((u64)srng->msi_addr >>
337				  HAL_ADDR_MSB_REG_SHIFT)) |
338		      HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
339		ath11k_hif_write32(ab, reg_base +
340				       HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab), val);
341
342		ath11k_hif_write32(ab,
343				   reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(ab),
344				   srng->msi_data);
345	}
346
347	ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
348
349	val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
350			 ((u64)srng->ring_base_paddr >>
351			  HAL_ADDR_MSB_REG_SHIFT)) |
352	      FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
353			 (srng->entry_size * srng->num_entries));
354	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(ab), val);
355
356	val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
357	      FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
358	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET(ab), val);
359
360	/* interrupt setup */
361	val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
362			 (srng->intr_timer_thres_us >> 3));
363
364	val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
365			  (srng->intr_batch_cntr_thres_entries *
366			   srng->entry_size));
367
368	ath11k_hif_write32(ab,
369			   reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab),
370			   val);
371
372	hp_addr = hal->rdp.paddr +
373		  ((unsigned long)srng->u.dst_ring.hp_addr -
374		   (unsigned long)hal->rdp.vaddr);
375	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab),
376			   hp_addr & HAL_ADDR_LSB_REG_MASK);
377	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab),
378			   hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
379
380	/* Initialize head and tail pointers to indicate ring is empty */
381	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
382	ath11k_hif_write32(ab, reg_base, 0);
383	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET(ab), 0);
384	*srng->u.dst_ring.hp_addr = 0;
385
386	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
387	val = 0;
388	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
389		val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
390	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
391		val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
392	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
393		val |= HAL_REO1_RING_MISC_MSI_SWAP;
394	val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
395
396	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET(ab), val);
397}
398
399static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
400					struct hal_srng *srng)
401{
402	struct ath11k_hal *hal = &ab->hal;
403	u32 val;
404	u64 tp_addr;
405	u32 reg_base;
406
407	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
408
409	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
410		ath11k_hif_write32(ab, reg_base +
411				   HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
412				   srng->msi_addr);
413
414		val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
415				 ((u64)srng->msi_addr >>
416				  HAL_ADDR_MSB_REG_SHIFT)) |
417		      HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
418		ath11k_hif_write32(ab, reg_base +
419				       HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab),
420				   val);
421
422		ath11k_hif_write32(ab, reg_base +
423				       HAL_TCL1_RING_MSI1_DATA_OFFSET(ab),
424				   srng->msi_data);
425	}
426
427	ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
428
429	val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
430			 ((u64)srng->ring_base_paddr >>
431			  HAL_ADDR_MSB_REG_SHIFT)) |
432	      FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
433			 (srng->entry_size * srng->num_entries));
434	ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
435
436	val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
437	ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
438
439	/* interrupt setup */
440	/* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
441	 * unit of 8 usecs instead of 1 usec (as required by v1).
442	 */
443	val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
444			 srng->intr_timer_thres_us);
445
446	val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
447			  (srng->intr_batch_cntr_thres_entries *
448			   srng->entry_size));
449
450	ath11k_hif_write32(ab,
451			   reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab),
452			   val);
453
454	val = 0;
455	if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
456		val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
457				  srng->u.src_ring.low_threshold);
458	}
459	ath11k_hif_write32(ab,
460			   reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab),
461			   val);
462
463	if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
464		tp_addr = hal->rdp.paddr +
465			  ((unsigned long)srng->u.src_ring.tp_addr -
466			   (unsigned long)hal->rdp.vaddr);
467		ath11k_hif_write32(ab,
468				   reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab),
469				   tp_addr & HAL_ADDR_LSB_REG_MASK);
470		ath11k_hif_write32(ab,
471				   reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab),
472				   tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
473	}
474
475	/* Initialize head and tail pointers to indicate ring is empty */
476	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
477	ath11k_hif_write32(ab, reg_base, 0);
478	ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
479	*srng->u.src_ring.tp_addr = 0;
480
481	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
482	val = 0;
483	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
484		val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
485	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
486		val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
487	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
488		val |= HAL_TCL1_RING_MISC_MSI_SWAP;
489
490	/* Loop count is not used for SRC rings */
491	val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
492
493	val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
494
495	ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val);
496}
497
498static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
499				    struct hal_srng *srng)
500{
501	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
502		ath11k_hal_srng_src_hw_init(ab, srng);
503	else
504		ath11k_hal_srng_dst_hw_init(ab, srng);
505}
506
507static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
508				       enum hal_ring_type type,
509				       int ring_num, int mac_id)
510{
511	struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
512	int ring_id;
513
514	if (ring_num >= srng_config->max_rings) {
515		ath11k_warn(ab, "invalid ring number :%d\n", ring_num);
516		return -EINVAL;
517	}
518
519	ring_id = srng_config->start_ring_id + ring_num;
520	if (srng_config->lmac_ring)
521		ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
522
523	if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
524		return -EINVAL;
525
526	return ring_id;
527}
528
529int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type)
530{
531	struct hal_srng_config *srng_config;
532
533	if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
534		return -EINVAL;
535
536	srng_config = &ab->hal.srng_config[ring_type];
537
538	return (srng_config->entry_size << 2);
539}
540
541int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type)
542{
543	struct hal_srng_config *srng_config;
544
545	if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
546		return -EINVAL;
547
548	srng_config = &ab->hal.srng_config[ring_type];
549
550	return (srng_config->max_size / srng_config->entry_size);
551}
552
553void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
554				struct hal_srng_params *params)
555{
556	params->ring_base_paddr = srng->ring_base_paddr;
557	params->ring_base_vaddr = srng->ring_base_vaddr;
558	params->num_entries = srng->num_entries;
559	params->intr_timer_thres_us = srng->intr_timer_thres_us;
560	params->intr_batch_cntr_thres_entries =
561		srng->intr_batch_cntr_thres_entries;
562	params->low_threshold = srng->u.src_ring.low_threshold;
563	params->msi_addr = srng->msi_addr;
564	params->msi_data = srng->msi_data;
565	params->flags = srng->flags;
566}
567
568dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
569				       struct hal_srng *srng)
570{
571	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
572		return 0;
573
574	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
575		return ab->hal.wrp.paddr +
576		       ((unsigned long)srng->u.src_ring.hp_addr -
577			(unsigned long)ab->hal.wrp.vaddr);
578	else
579		return ab->hal.rdp.paddr +
580		       ((unsigned long)srng->u.dst_ring.hp_addr -
581			 (unsigned long)ab->hal.rdp.vaddr);
582}
583
584dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
585				       struct hal_srng *srng)
586{
587	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
588		return 0;
589
590	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
591		return ab->hal.rdp.paddr +
592		       ((unsigned long)srng->u.src_ring.tp_addr -
593			(unsigned long)ab->hal.rdp.vaddr);
594	else
595		return ab->hal.wrp.paddr +
596		       ((unsigned long)srng->u.dst_ring.tp_addr -
597			(unsigned long)ab->hal.wrp.vaddr);
598}
599
600u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)
601{
602	switch (type) {
603	case HAL_CE_DESC_SRC:
604		return sizeof(struct hal_ce_srng_src_desc);
605	case HAL_CE_DESC_DST:
606		return sizeof(struct hal_ce_srng_dest_desc);
607	case HAL_CE_DESC_DST_STATUS:
608		return sizeof(struct hal_ce_srng_dst_status_desc);
609	}
610
611	return 0;
612}
613
614void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
615				u8 byte_swap_data)
616{
617	struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf;
618
619	desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
620	desc->buffer_addr_info =
621		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
622			   ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
623		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
624			   byte_swap_data) |
625		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
626		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
627	desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
628}
629
630void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr)
631{
632	struct hal_ce_srng_dest_desc *desc =
633		(struct hal_ce_srng_dest_desc *)buf;
634
635	desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
636	desc->buffer_addr_info =
637		FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
638			   ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT));
639}
640
641u32 ath11k_hal_ce_dst_status_get_length(void *buf)
642{
643	struct hal_ce_srng_dst_status_desc *desc =
644		(struct hal_ce_srng_dst_status_desc *)buf;
645	u32 len;
646
647	len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
648	desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
649
650	return len;
651}
652
653void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
654				   dma_addr_t paddr)
655{
656	desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
657					       (paddr & HAL_ADDR_LSB_REG_MASK));
658	desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
659					       ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
660				    FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
661				    FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
662}
663
664u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
665{
666	lockdep_assert_held(&srng->lock);
667
668	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
669		return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
670
671	return NULL;
672}
673
674u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
675					struct hal_srng *srng)
676{
677	u32 *desc;
678
679	lockdep_assert_held(&srng->lock);
680
681	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
682		return NULL;
683
684	desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
685
686	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
687			      srng->ring_size;
688
689	return desc;
690}
691
692int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
693				 bool sync_hw_ptr)
694{
695	u32 tp, hp;
696
697	lockdep_assert_held(&srng->lock);
698
699	tp = srng->u.dst_ring.tp;
700
701	if (sync_hw_ptr) {
702		hp = *srng->u.dst_ring.hp_addr;
703		srng->u.dst_ring.cached_hp = hp;
704	} else {
705		hp = srng->u.dst_ring.cached_hp;
706	}
707
708	if (hp >= tp)
709		return (hp - tp) / srng->entry_size;
710	else
711		return (srng->ring_size - tp + hp) / srng->entry_size;
712}
713
714/* Returns number of available entries in src ring */
715int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
716				 bool sync_hw_ptr)
717{
718	u32 tp, hp;
719
720	lockdep_assert_held(&srng->lock);
721
722	hp = srng->u.src_ring.hp;
723
724	if (sync_hw_ptr) {
725		tp = *srng->u.src_ring.tp_addr;
726		srng->u.src_ring.cached_tp = tp;
727	} else {
728		tp = srng->u.src_ring.cached_tp;
729	}
730
731	if (tp > hp)
732		return ((tp - hp) / srng->entry_size) - 1;
733	else
734		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
735}
736
737u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
738					struct hal_srng *srng)
739{
740	u32 *desc;
741	u32 next_hp;
742
743	lockdep_assert_held(&srng->lock);
744
745	/* TODO: Using % is expensive, but we have to do this since size of some
746	 * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
747	 * if separate function is defined for rings having power of 2 ring size
748	 * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
749	 * overhead of % by using mask (with &).
750	 */
751	next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
752
753	if (next_hp == srng->u.src_ring.cached_tp)
754		return NULL;
755
756	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
757	srng->u.src_ring.hp = next_hp;
758
759	/* TODO: Reap functionality is not used by all rings. If particular
760	 * ring does not use reap functionality, we need not update reap_hp
761	 * with next_hp pointer. Need to make sure a separate function is used
762	 * before doing any optimization by removing below code updating
763	 * reap_hp.
764	 */
765	srng->u.src_ring.reap_hp = next_hp;
766
767	return desc;
768}
769
770u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
771				   struct hal_srng *srng)
772{
773	u32 *desc;
774	u32 next_reap_hp;
775
776	lockdep_assert_held(&srng->lock);
777
778	next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
779		       srng->ring_size;
780
781	if (next_reap_hp == srng->u.src_ring.cached_tp)
782		return NULL;
783
784	desc = srng->ring_base_vaddr + next_reap_hp;
785	srng->u.src_ring.reap_hp = next_reap_hp;
786
787	return desc;
788}
789
790u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
791					 struct hal_srng *srng)
792{
793	u32 *desc;
794
795	lockdep_assert_held(&srng->lock);
796
797	if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
798		return NULL;
799
800	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
801	srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
802			      srng->ring_size;
803
804	return desc;
805}
806
807u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
808{
809	lockdep_assert_held(&srng->lock);
810
811	if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
812	    srng->u.src_ring.cached_tp)
813		return NULL;
814
815	return srng->ring_base_vaddr + srng->u.src_ring.hp;
816}
817
818void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
819{
820	lockdep_assert_held(&srng->lock);
821
822	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
823		srng->u.src_ring.cached_tp =
824			*(volatile u32 *)srng->u.src_ring.tp_addr;
825	else
826		srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
827}
828
829/* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin()
830 * should have been called before this.
831 */
832void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
833{
834	lockdep_assert_held(&srng->lock);
835
836	/* TODO: See if we need a write memory barrier here */
837	if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
838		/* For LMAC rings, ring pointer updates are done through FW and
839		 * hence written to a shared memory location that is read by FW
840		 */
841		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
842			srng->u.src_ring.last_tp =
843				*(volatile u32 *)srng->u.src_ring.tp_addr;
844			*srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
845		} else {
846			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
847			*srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
848		}
849	} else {
850		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
851			srng->u.src_ring.last_tp =
852				*(volatile u32 *)srng->u.src_ring.tp_addr;
853			ath11k_hif_write32(ab,
854					   (unsigned long)srng->u.src_ring.hp_addr -
855					   (unsigned long)ab->mem,
856					   srng->u.src_ring.hp);
857		} else {
858			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
859			ath11k_hif_write32(ab,
860					   (unsigned long)srng->u.dst_ring.tp_addr -
861					   (unsigned long)ab->mem,
862					   srng->u.dst_ring.tp);
863		}
864	}
865
866	srng->timestamp = jiffies;
867}
868
869void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
870				     struct hal_wbm_idle_scatter_list *sbuf,
871				     u32 nsbufs, u32 tot_link_desc,
872				     u32 end_offset)
873{
874	struct ath11k_buffer_addr *link_addr;
875	int i;
876	u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
877
878	link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
879
880	for (i = 1; i < nsbufs; i++) {
881		link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
882		link_addr->info1 = FIELD_PREP(
883				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
884				(u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
885				FIELD_PREP(
886				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
887				BASE_ADDR_MATCH_TAG_VAL);
888
889		link_addr = (void *)sbuf[i].vaddr +
890			     HAL_WBM_IDLE_SCATTER_BUF_SIZE;
891	}
892
893	ath11k_hif_write32(ab,
894			   HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
895			   FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
896			   FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
897	ath11k_hif_write32(ab,
898			   HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
899			   FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
900				      reg_scatter_buf_sz * nsbufs));
901	ath11k_hif_write32(ab,
902			   HAL_SEQ_WCSS_UMAC_WBM_REG +
903			   HAL_WBM_SCATTERED_RING_BASE_LSB,
904			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
905				      sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
906	ath11k_hif_write32(ab,
907			   HAL_SEQ_WCSS_UMAC_WBM_REG +
908			   HAL_WBM_SCATTERED_RING_BASE_MSB,
909			   FIELD_PREP(
910				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
911				(u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
912				FIELD_PREP(
913				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
914				BASE_ADDR_MATCH_TAG_VAL));
915
916	/* Setup head and tail pointers for the idle list */
917	ath11k_hif_write32(ab,
918			   HAL_SEQ_WCSS_UMAC_WBM_REG +
919			   HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
920			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
921				      sbuf[nsbufs - 1].paddr));
922	ath11k_hif_write32(ab,
923			   HAL_SEQ_WCSS_UMAC_WBM_REG +
924			   HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
925			   FIELD_PREP(
926				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
927				((u64)sbuf[nsbufs - 1].paddr >>
928				 HAL_ADDR_MSB_REG_SHIFT)) |
929			   FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
930				      (end_offset >> 2)));
931	ath11k_hif_write32(ab,
932			   HAL_SEQ_WCSS_UMAC_WBM_REG +
933			   HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
934			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
935				      sbuf[0].paddr));
936
937	ath11k_hif_write32(ab,
938			   HAL_SEQ_WCSS_UMAC_WBM_REG +
939			   HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
940			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
941				      sbuf[0].paddr));
942	ath11k_hif_write32(ab,
943			   HAL_SEQ_WCSS_UMAC_WBM_REG +
944			   HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
945			   FIELD_PREP(
946				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
947				((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
948			   FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1,
949				      0));
950	ath11k_hif_write32(ab,
951			   HAL_SEQ_WCSS_UMAC_WBM_REG +
952			   HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
953			   2 * tot_link_desc);
954
955	/* Enable the SRNG */
956	ath11k_hif_write32(ab,
957			   HAL_SEQ_WCSS_UMAC_WBM_REG +
958			   HAL_WBM_IDLE_LINK_RING_MISC_ADDR, 0x40);
959}
960
961int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
962			  int ring_num, int mac_id,
963			  struct hal_srng_params *params)
964{
965	struct ath11k_hal *hal = &ab->hal;
966	struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
967	struct hal_srng *srng;
968	int ring_id;
969	u32 lmac_idx;
970	int i;
971	u32 reg_base;
972
973	ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
974	if (ring_id < 0)
975		return ring_id;
976
977	srng = &hal->srng_list[ring_id];
978
979	srng->ring_id = ring_id;
980	srng->ring_dir = srng_config->ring_dir;
981	srng->ring_base_paddr = params->ring_base_paddr;
982	srng->ring_base_vaddr = params->ring_base_vaddr;
983	srng->entry_size = srng_config->entry_size;
984	srng->num_entries = params->num_entries;
985	srng->ring_size = srng->entry_size * srng->num_entries;
986	srng->intr_batch_cntr_thres_entries =
987				params->intr_batch_cntr_thres_entries;
988	srng->intr_timer_thres_us = params->intr_timer_thres_us;
989	srng->flags = params->flags;
990	srng->msi_addr = params->msi_addr;
991	srng->msi_data = params->msi_data;
992	srng->initialized = 1;
993	spin_lock_init(&srng->lock);
994	lockdep_set_class(&srng->lock, hal->srng_key + ring_id);
995
996	for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
997		srng->hwreg_base[i] = srng_config->reg_start[i] +
998				      (ring_num * srng_config->reg_size[i]);
999	}
1000
1001	memset(srng->ring_base_vaddr, 0,
1002	       (srng->entry_size * srng->num_entries) << 2);
1003
1004	/* TODO: Add comments on these swap configurations */
1005	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1006		srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
1007			       HAL_SRNG_FLAGS_RING_PTR_SWAP;
1008
1009	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
1010
1011	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
1012		srng->u.src_ring.hp = 0;
1013		srng->u.src_ring.cached_tp = 0;
1014		srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
1015		srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
1016		srng->u.src_ring.low_threshold = params->low_threshold *
1017						 srng->entry_size;
1018		if (srng_config->lmac_ring) {
1019			lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
1020			srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
1021						   lmac_idx);
1022			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
1023		} else {
1024			if (!ab->hw_params.supports_shadow_regs)
1025				srng->u.src_ring.hp_addr =
1026				(u32 *)((unsigned long)ab->mem + reg_base);
1027			else
1028				ath11k_dbg(ab, ATH11k_DBG_HAL,
1029					   "hal type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
1030					   type, ring_num,
1031					   reg_base,
1032					   (unsigned long)srng->u.src_ring.hp_addr -
1033					   (unsigned long)ab->mem);
1034		}
1035	} else {
1036		/* During initialization loop count in all the descriptors
1037		 * will be set to zero, and HW will set it to 1 on completing
1038		 * descriptor update in first loop, and increments it by 1 on
1039		 * subsequent loops (loop count wraps around after reaching
1040		 * 0xffff). The 'loop_cnt' in SW ring state is the expected
1041		 * loop count in descriptors updated by HW (to be processed
1042		 * by SW).
1043		 */
1044		srng->u.dst_ring.loop_cnt = 1;
1045		srng->u.dst_ring.tp = 0;
1046		srng->u.dst_ring.cached_hp = 0;
1047		srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
1048		if (srng_config->lmac_ring) {
1049			/* For LMAC rings, tail pointer updates will be done
1050			 * through FW by writing to a shared memory location
1051			 */
1052			lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
1053			srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
1054						   lmac_idx);
1055			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
1056		} else {
1057			if (!ab->hw_params.supports_shadow_regs)
1058				srng->u.dst_ring.tp_addr =
1059				(u32 *)((unsigned long)ab->mem + reg_base +
1060					(HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab)));
1061			else
1062				ath11k_dbg(ab, ATH11k_DBG_HAL,
1063					   "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
1064					   type, ring_num,
1065					   reg_base + (HAL_REO1_RING_TP(ab) -
1066						       HAL_REO1_RING_HP(ab)),
1067					   (unsigned long)srng->u.dst_ring.tp_addr -
1068					   (unsigned long)ab->mem);
1069		}
1070	}
1071
1072	if (srng_config->lmac_ring)
1073		return ring_id;
1074
1075	ath11k_hal_srng_hw_init(ab, srng);
1076
1077	if (type == HAL_CE_DST) {
1078		srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
1079		ath11k_hal_ce_dst_setup(ab, srng, ring_num);
1080	}
1081
1082	return ring_id;
1083}
1084
1085static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab,
1086					      int shadow_cfg_idx,
1087					  enum hal_ring_type ring_type,
1088					  int ring_num)
1089{
1090	struct hal_srng *srng;
1091	struct ath11k_hal *hal = &ab->hal;
1092	int ring_id;
1093	struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
1094
1095	ring_id = ath11k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0);
1096	if (ring_id < 0)
1097		return;
1098
1099	srng = &hal->srng_list[ring_id];
1100
1101	if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
1102		srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
1103						   (unsigned long)ab->mem);
1104	else
1105		srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
1106						   (unsigned long)ab->mem);
1107}
1108
1109int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
1110					 enum hal_ring_type ring_type,
1111					 int ring_num)
1112{
1113	struct ath11k_hal *hal = &ab->hal;
1114	struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
1115	int shadow_cfg_idx = hal->num_shadow_reg_configured;
1116	u32 target_reg;
1117
1118	if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
1119		return -EINVAL;
1120
1121	hal->num_shadow_reg_configured++;
1122
1123	target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
1124	target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
1125		ring_num;
1126
1127	/* For destination ring, shadow the TP */
1128	if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
1129		target_reg += HAL_OFFSET_FROM_HP_TO_TP;
1130
1131	hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
1132
1133	/* update hp/tp addr to hal structure*/
1134	ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
1135					  ring_num);
1136
1137	ath11k_dbg(ab, ATH11k_DBG_HAL,
1138		   "target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d",
1139		  target_reg,
1140		  HAL_SHADOW_REG(shadow_cfg_idx),
1141		  shadow_cfg_idx,
1142		  ring_type, ring_num);
1143
1144	return 0;
1145}
1146
1147void ath11k_hal_srng_shadow_config(struct ath11k_base *ab)
1148{
1149	struct ath11k_hal *hal = &ab->hal;
1150	int ring_type, ring_num;
1151
1152	/* update all the non-CE srngs. */
1153	for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
1154		struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
1155
1156		if (ring_type == HAL_CE_SRC ||
1157		    ring_type == HAL_CE_DST ||
1158			ring_type == HAL_CE_DST_STATUS)
1159			continue;
1160
1161		if (srng_config->lmac_ring)
1162			continue;
1163
1164		for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++)
1165			ath11k_hal_srng_update_shadow_config(ab, ring_type, ring_num);
1166	}
1167}
1168
1169void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
1170				       u32 **cfg, u32 *len)
1171{
1172	struct ath11k_hal *hal = &ab->hal;
1173
1174	*len = hal->num_shadow_reg_configured;
1175	*cfg = hal->shadow_reg_addr;
1176}
1177
1178void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
1179					 struct hal_srng *srng)
1180{
1181	lockdep_assert_held(&srng->lock);
1182
1183	/* check whether the ring is emptry. Update the shadow
1184	 * HP only when then ring isn't' empty.
1185	 */
1186	if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
1187	    *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
1188		ath11k_hal_srng_access_end(ab, srng);
1189}
1190
1191static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
1192{
1193	struct ath11k_hal *hal = &ab->hal;
1194	struct hal_srng_config *s;
1195
1196	hal->srng_config = kmemdup(hw_srng_config_template,
1197				   sizeof(hw_srng_config_template),
1198				   GFP_KERNEL);
1199	if (!hal->srng_config)
1200		return -ENOMEM;
1201
1202	s = &hal->srng_config[HAL_REO_DST];
1203	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
1204	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(ab);
1205	s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
1206	s->reg_size[1] = HAL_REO2_RING_HP(ab) - HAL_REO1_RING_HP(ab);
1207
1208	s = &hal->srng_config[HAL_REO_EXCEPTION];
1209	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(ab);
1210	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab);
1211
1212	s = &hal->srng_config[HAL_REO_REINJECT];
1213	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB;
1214	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
1215
1216	s = &hal->srng_config[HAL_REO_CMD];
1217	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB;
1218	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
1219
1220	s = &hal->srng_config[HAL_REO_STATUS];
1221	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
1222	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(ab);
1223
1224	s = &hal->srng_config[HAL_TCL_DATA];
1225	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
1226	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
1227	s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
1228	s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
1229
1230	s = &hal->srng_config[HAL_TCL_CMD];
1231	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
1232	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
1233
1234	s = &hal->srng_config[HAL_TCL_STATUS];
1235	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
1236	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
1237
1238	return 0;
1239}
1240
1241static void ath11k_hal_register_srng_key(struct ath11k_base *ab)
1242{
1243	struct ath11k_hal *hal = &ab->hal;
1244	u32 ring_id;
1245
1246	for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
1247		lockdep_register_key(hal->srng_key + ring_id);
1248}
1249
1250static void ath11k_hal_unregister_srng_key(struct ath11k_base *ab)
1251{
1252	struct ath11k_hal *hal = &ab->hal;
1253	u32 ring_id;
1254
1255	for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
1256		lockdep_unregister_key(hal->srng_key + ring_id);
1257}
1258
1259int ath11k_hal_srng_init(struct ath11k_base *ab)
1260{
1261	struct ath11k_hal *hal = &ab->hal;
1262	int ret;
1263
1264	memset(hal, 0, sizeof(*hal));
1265
1266	ret = ath11k_hal_srng_create_config(ab);
1267	if (ret)
1268		goto err_hal;
1269
1270	ret = ath11k_hal_alloc_cont_rdp(ab);
1271	if (ret)
1272		goto err_hal;
1273
1274	ret = ath11k_hal_alloc_cont_wrp(ab);
1275	if (ret)
1276		goto err_free_cont_rdp;
1277
1278	ath11k_hal_register_srng_key(ab);
1279
1280	return 0;
1281
1282err_free_cont_rdp:
1283	ath11k_hal_free_cont_rdp(ab);
1284
1285err_hal:
1286	return ret;
1287}
1288EXPORT_SYMBOL(ath11k_hal_srng_init);
1289
1290void ath11k_hal_srng_deinit(struct ath11k_base *ab)
1291{
1292	struct ath11k_hal *hal = &ab->hal;
1293
1294	ath11k_hal_unregister_srng_key(ab);
1295	ath11k_hal_free_cont_rdp(ab);
1296	ath11k_hal_free_cont_wrp(ab);
1297	kfree(hal->srng_config);
1298}
1299EXPORT_SYMBOL(ath11k_hal_srng_deinit);
1300
1301void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
1302{
1303	struct hal_srng *srng;
1304	struct ath11k_ext_irq_grp *irq_grp;
1305	struct ath11k_ce_pipe *ce_pipe;
1306	int i;
1307
1308	ath11k_err(ab, "Last interrupt received for each CE:\n");
1309	for (i = 0; i < ab->hw_params.ce_count; i++) {
1310		ce_pipe = &ab->ce.ce_pipe[i];
1311
1312		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
1313			continue;
1314
1315		ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n",
1316			   i, ce_pipe->pipe_num,
1317			   jiffies_to_msecs(jiffies - ce_pipe->timestamp));
1318	}
1319
1320	ath11k_err(ab, "\nLast interrupt received for each group:\n");
1321	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
1322		irq_grp = &ab->ext_irq_grp[i];
1323		ath11k_err(ab, "group_id %d %ums before\n",
1324			   irq_grp->grp_id,
1325			   jiffies_to_msecs(jiffies - irq_grp->timestamp));
1326	}
1327
1328	for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
1329		srng = &ab->hal.srng_list[i];
1330
1331		if (!srng->initialized)
1332			continue;
1333
1334		if (srng->ring_dir == HAL_SRNG_DIR_SRC)
1335			ath11k_err(ab,
1336				   "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
1337				   srng->ring_id, srng->u.src_ring.hp,
1338				   srng->u.src_ring.reap_hp,
1339				   *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
1340				   srng->u.src_ring.last_tp,
1341				   jiffies_to_msecs(jiffies - srng->timestamp));
1342		else if (srng->ring_dir == HAL_SRNG_DIR_DST)
1343			ath11k_err(ab,
1344				   "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
1345				   srng->ring_id, srng->u.dst_ring.tp,
1346				   *srng->u.dst_ring.hp_addr,
1347				   srng->u.dst_ring.cached_hp,
1348				   srng->u.dst_ring.last_hp,
1349				   jiffies_to_msecs(jiffies - srng->timestamp));
1350	}
1351}
1352