xref: /kernel/linux/linux-5.10/drivers/net/ipa/gsi.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2
3/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2018-2020 Linaro Ltd.
5 */
6
7#include <linux/types.h>
8#include <linux/bits.h>
9#include <linux/bitfield.h>
10#include <linux/mutex.h>
11#include <linux/completion.h>
12#include <linux/io.h>
13#include <linux/bug.h>
14#include <linux/interrupt.h>
15#include <linux/platform_device.h>
16#include <linux/netdevice.h>
17
18#include "gsi.h"
19#include "gsi_reg.h"
20#include "gsi_private.h"
21#include "gsi_trans.h"
22#include "ipa_gsi.h"
23#include "ipa_data.h"
24
25/**
26 * DOC: The IPA Generic Software Interface
27 *
28 * The generic software interface (GSI) is an integral component of the IPA,
29 * providing a well-defined communication layer between the AP subsystem
30 * and the IPA core.  The modem uses the GSI layer as well.
31 *
32 *	--------	     ---------
33 *	|      |	     |	     |
34 *	|  AP  +<---.	.----+ Modem |
35 *	|      +--. |	| .->+	     |
36 *	|      |  | |	| |  |	     |
37 *	--------  | |	| |  ---------
38 *		  v |	v |
39 *		--+-+---+-+--
40 *		|    GSI    |
41 *		|-----------|
42 *		|	    |
43 *		|    IPA    |
44 *		|	    |
45 *		-------------
46 *
47 * In the above diagram, the AP and Modem represent "execution environments"
48 * (EEs), which are independent operating environments that use the IPA for
49 * data transfer.
50 *
51 * Each EE uses a set of unidirectional GSI "channels," which allow transfer
52 * of data to or from the IPA.  A channel is implemented as a ring buffer,
53 * with a DRAM-resident array of "transfer elements" (TREs) available to
54 * describe transfers to or from other EEs through the IPA.  A transfer
55 * element can also contain an immediate command, requesting the IPA perform
56 * actions other than data transfer.
57 *
58 * Each TRE refers to a block of data--also located DRAM.  After writing one
59 * or more TREs to a channel, the writer (either the IPA or an EE) writes a
60 * doorbell register to inform the receiving side how many elements have
61 * been written.
62 *
63 * Each channel has a GSI "event ring" associated with it.  An event ring
64 * is implemented very much like a channel ring, but is always directed from
65 * the IPA to an EE.  The IPA notifies an EE (such as the AP) about channel
66 * events by adding an entry to the event ring associated with the channel.
67 * The GSI then writes its doorbell for the event ring, causing the target
68 * EE to be interrupted.  Each entry in an event ring contains a pointer
69 * to the channel TRE whose completion the event represents.
70 *
71 * Each TRE in a channel ring has a set of flags.  One flag indicates whether
72 * the completion of the transfer operation generates an entry (and possibly
73 * an interrupt) in the channel's event ring.  Other flags allow transfer
74 * elements to be chained together, forming a single logical transaction.
75 * TRE flags are used to control whether and when interrupts are generated
76 * to signal completion of channel transfers.
77 *
78 * Elements in channel and event rings are completed (or consumed) strictly
79 * in order.  Completion of one entry implies the completion of all preceding
80 * entries.  A single completion interrupt can therefore communicate the
81 * completion of many transfers.
82 *
83 * Note that all GSI registers are little-endian, which is the assumed
84 * endianness of I/O space accesses.  The accessor functions perform byte
85 * swapping if needed (i.e., for a big endian CPU).
86 */
87
88/* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
89#define GSI_EVT_RING_INT_MODT		(32 * 1) /* 1ms under 32KHz clock */
90
91#define GSI_CMD_TIMEOUT			5	/* seconds */
92
93#define GSI_CHANNEL_STOP_RX_RETRIES	10
94
95#define GSI_MHI_EVENT_ID_START		10	/* 1st reserved event id */
96#define GSI_MHI_EVENT_ID_END		16	/* Last reserved event id */
97
98#define GSI_ISR_MAX_ITER		50	/* Detect interrupt storms */
99
100/* An entry in an event ring */
101struct gsi_event {
102	__le64 xfer_ptr;
103	__le16 len;
104	u8 reserved1;
105	u8 code;
106	__le16 reserved2;
107	u8 type;
108	u8 chid;
109};
110
111/* Hardware values from the error log register error code field */
112enum gsi_err_code {
113	GSI_INVALID_TRE_ERR			= 0x1,
114	GSI_OUT_OF_BUFFERS_ERR			= 0x2,
115	GSI_OUT_OF_RESOURCES_ERR		= 0x3,
116	GSI_UNSUPPORTED_INTER_EE_OP_ERR		= 0x4,
117	GSI_EVT_RING_EMPTY_ERR			= 0x5,
118	GSI_NON_ALLOCATED_EVT_ACCESS_ERR	= 0x6,
119	GSI_HWO_1_ERR				= 0x8,
120};
121
122/* Hardware values from the error log register error type field */
123enum gsi_err_type {
124	GSI_ERR_TYPE_GLOB	= 0x1,
125	GSI_ERR_TYPE_CHAN	= 0x2,
126	GSI_ERR_TYPE_EVT	= 0x3,
127};
128
129/* Hardware values used when programming an event ring */
130enum gsi_evt_chtype {
131	GSI_EVT_CHTYPE_MHI_EV	= 0x0,
132	GSI_EVT_CHTYPE_XHCI_EV	= 0x1,
133	GSI_EVT_CHTYPE_GPI_EV	= 0x2,
134	GSI_EVT_CHTYPE_XDCI_EV	= 0x3,
135};
136
137/* Hardware values used when programming a channel */
138enum gsi_channel_protocol {
139	GSI_CHANNEL_PROTOCOL_MHI	= 0x0,
140	GSI_CHANNEL_PROTOCOL_XHCI	= 0x1,
141	GSI_CHANNEL_PROTOCOL_GPI	= 0x2,
142	GSI_CHANNEL_PROTOCOL_XDCI	= 0x3,
143};
144
145/* Hardware values representing an event ring immediate command opcode */
146enum gsi_evt_cmd_opcode {
147	GSI_EVT_ALLOCATE	= 0x0,
148	GSI_EVT_RESET		= 0x9,
149	GSI_EVT_DE_ALLOC	= 0xa,
150};
151
152/* Hardware values representing a generic immediate command opcode */
153enum gsi_generic_cmd_opcode {
154	GSI_GENERIC_HALT_CHANNEL	= 0x1,
155	GSI_GENERIC_ALLOCATE_CHANNEL	= 0x2,
156};
157
158/* Hardware values representing a channel immediate command opcode */
159enum gsi_ch_cmd_opcode {
160	GSI_CH_ALLOCATE	= 0x0,
161	GSI_CH_START	= 0x1,
162	GSI_CH_STOP	= 0x2,
163	GSI_CH_RESET	= 0x9,
164	GSI_CH_DE_ALLOC	= 0xa,
165};
166
167/** gsi_channel_scratch_gpi - GPI protocol scratch register
168 * @max_outstanding_tre:
169 *	Defines the maximum number of TREs allowed in a single transaction
170 *	on a channel (in bytes).  This determines the amount of prefetch
171 *	performed by the hardware.  We configure this to equal the size of
172 *	the TLV FIFO for the channel.
173 * @outstanding_threshold:
174 *	Defines the threshold (in bytes) determining when the sequencer
175 *	should update the channel doorbell.  We configure this to equal
176 *	the size of two TREs.
177 */
178struct gsi_channel_scratch_gpi {
179	u64 reserved1;
180	u16 reserved2;
181	u16 max_outstanding_tre;
182	u16 reserved3;
183	u16 outstanding_threshold;
184};
185
186/** gsi_channel_scratch - channel scratch configuration area
187 *
188 * The exact interpretation of this register is protocol-specific.
189 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
190 */
191union gsi_channel_scratch {
192	struct gsi_channel_scratch_gpi gpi;
193	struct {
194		u32 word1;
195		u32 word2;
196		u32 word3;
197		u32 word4;
198	} data;
199};
200
201/* Check things that can be validated at build time. */
202static void gsi_validate_build(void)
203{
204	/* This is used as a divisor */
205	BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
206
207	/* Code assumes the size of channel and event ring element are
208	 * the same (and fixed).  Make sure the size of an event ring
209	 * element is what's expected.
210	 */
211	BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
212
213	/* Hardware requires a 2^n ring size.  We ensure the number of
214	 * elements in an event ring is a power of 2 elsewhere; this
215	 * ensure the elements themselves meet the requirement.
216	 */
217	BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
218
219	/* The channel element size must fit in this field */
220	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
221
222	/* The event ring element size must fit in this field */
223	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
224}
225
226/* Return the channel id associated with a given channel */
227static u32 gsi_channel_id(struct gsi_channel *channel)
228{
229	return channel - &channel->gsi->channel[0];
230}
231
232static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
233{
234	u32 val;
235
236	gsi->event_enable_bitmap |= BIT(evt_ring_id);
237	val = gsi->event_enable_bitmap;
238	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
239}
240
241static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
242{
243	u32 val;
244
245	gsi->event_enable_bitmap &= ~BIT(evt_ring_id);
246	val = gsi->event_enable_bitmap;
247	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
248}
249
250/* Enable all GSI_interrupt types */
251static void gsi_irq_enable(struct gsi *gsi)
252{
253	u32 val;
254
255	/* We don't use inter-EE channel or event interrupts */
256	val = GSI_CNTXT_TYPE_IRQ_MSK_ALL;
257	val &= ~INTER_EE_CH_CTRL_FMASK;
258	val &= ~INTER_EE_EV_CTRL_FMASK;
259	iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
260
261	val = GENMASK(gsi->channel_count - 1, 0);
262	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
263
264	val = GENMASK(gsi->evt_ring_count - 1, 0);
265	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
266
267	/* Each IEOB interrupt is enabled (later) as needed by channels */
268	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
269
270	val = GSI_CNTXT_GLOB_IRQ_ALL;
271	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
272
273	/* Never enable GSI_BREAK_POINT */
274	val = GSI_CNTXT_GSI_IRQ_ALL & ~BREAK_POINT_FMASK;
275	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
276}
277
278/* Disable all GSI_interrupt types */
279static void gsi_irq_disable(struct gsi *gsi)
280{
281	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
282	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
283	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
284	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
285	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
286	iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
287}
288
289/* Return the virtual address associated with a ring index */
290void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
291{
292	/* Note: index *must* be used modulo the ring count here */
293	return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
294}
295
296/* Return the 32-bit DMA address associated with a ring index */
297static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
298{
299	return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE;
300}
301
302/* Return the ring index of a 32-bit ring offset */
303static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
304{
305	return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
306}
307
308/* Issue a GSI command by writing a value to a register, then wait for
309 * completion to be signaled.  Returns true if the command completes
310 * or false if it times out.
311 */
312static bool
313gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
314{
315	reinit_completion(completion);
316
317	iowrite32(val, gsi->virt + reg);
318
319	return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
320}
321
322/* Return the hardware's notion of the current state of an event ring */
323static enum gsi_evt_ring_state
324gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
325{
326	u32 val;
327
328	val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
329
330	return u32_get_bits(val, EV_CHSTATE_FMASK);
331}
332
333/* Issue an event ring command and wait for it to complete */
334static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
335			    enum gsi_evt_cmd_opcode opcode)
336{
337	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
338	struct completion *completion = &evt_ring->completion;
339	struct device *dev = gsi->dev;
340	u32 val;
341
342	val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
343	val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
344
345	if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion))
346		return 0;	/* Success! */
347
348	dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
349		opcode, evt_ring_id, evt_ring->state);
350
351	return -ETIMEDOUT;
352}
353
354/* Allocate an event ring in NOT_ALLOCATED state */
355static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
356{
357	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
358	int ret;
359
360	/* Get initial event ring state */
361	evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
362	if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
363		dev_err(gsi->dev, "bad event ring state %u before alloc\n",
364			evt_ring->state);
365		return -EINVAL;
366	}
367
368	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
369	if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
370		dev_err(gsi->dev, "bad event ring state %u after alloc\n",
371			evt_ring->state);
372		ret = -EIO;
373	}
374
375	return ret;
376}
377
378/* Reset a GSI event ring in ALLOCATED or ERROR state. */
379static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
380{
381	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
382	enum gsi_evt_ring_state state = evt_ring->state;
383	int ret;
384
385	if (state != GSI_EVT_RING_STATE_ALLOCATED &&
386	    state != GSI_EVT_RING_STATE_ERROR) {
387		dev_err(gsi->dev, "bad event ring state %u before reset\n",
388			evt_ring->state);
389		return;
390	}
391
392	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
393	if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
394		dev_err(gsi->dev, "bad event ring state %u after reset\n",
395			evt_ring->state);
396}
397
398/* Issue a hardware de-allocation request for an allocated event ring */
399static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
400{
401	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
402	int ret;
403
404	if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
405		dev_err(gsi->dev, "bad event ring state %u before dealloc\n",
406			evt_ring->state);
407		return;
408	}
409
410	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
411	if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
412		dev_err(gsi->dev, "bad event ring state %u after dealloc\n",
413			evt_ring->state);
414}
415
416/* Fetch the current state of a channel from hardware */
417static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
418{
419	u32 channel_id = gsi_channel_id(channel);
420	void *virt = channel->gsi->virt;
421	u32 val;
422
423	val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
424
425	return u32_get_bits(val, CHSTATE_FMASK);
426}
427
428/* Issue a channel command and wait for it to complete */
429static int
430gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
431{
432	struct completion *completion = &channel->completion;
433	u32 channel_id = gsi_channel_id(channel);
434	struct gsi *gsi = channel->gsi;
435	struct device *dev = gsi->dev;
436	u32 val;
437
438	val = u32_encode_bits(channel_id, CH_CHID_FMASK);
439	val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
440
441	if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion))
442		return 0;	/* Success! */
443
444	dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
445		opcode, channel_id, gsi_channel_state(channel));
446
447	return -ETIMEDOUT;
448}
449
450/* Allocate GSI channel in NOT_ALLOCATED state */
451static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
452{
453	struct gsi_channel *channel = &gsi->channel[channel_id];
454	struct device *dev = gsi->dev;
455	enum gsi_channel_state state;
456	int ret;
457
458	/* Get initial channel state */
459	state = gsi_channel_state(channel);
460	if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
461		dev_err(dev, "bad channel state %u before alloc\n", state);
462		return -EINVAL;
463	}
464
465	ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
466
467	/* Channel state will normally have been updated */
468	state = gsi_channel_state(channel);
469	if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
470		dev_err(dev, "bad channel state %u after alloc\n", state);
471		ret = -EIO;
472	}
473
474	return ret;
475}
476
477/* Start an ALLOCATED channel */
478static int gsi_channel_start_command(struct gsi_channel *channel)
479{
480	struct device *dev = channel->gsi->dev;
481	enum gsi_channel_state state;
482	int ret;
483
484	state = gsi_channel_state(channel);
485	if (state != GSI_CHANNEL_STATE_ALLOCATED &&
486	    state != GSI_CHANNEL_STATE_STOPPED) {
487		dev_err(dev, "bad channel state %u before start\n", state);
488		return -EINVAL;
489	}
490
491	ret = gsi_channel_command(channel, GSI_CH_START);
492
493	/* Channel state will normally have been updated */
494	state = gsi_channel_state(channel);
495	if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
496		dev_err(dev, "bad channel state %u after start\n", state);
497		ret = -EIO;
498	}
499
500	return ret;
501}
502
503/* Stop a GSI channel in STARTED state */
504static int gsi_channel_stop_command(struct gsi_channel *channel)
505{
506	struct device *dev = channel->gsi->dev;
507	enum gsi_channel_state state;
508	int ret;
509
510	state = gsi_channel_state(channel);
511
512	/* Channel could have entered STOPPED state since last call
513	 * if it timed out.  If so, we're done.
514	 */
515	if (state == GSI_CHANNEL_STATE_STOPPED)
516		return 0;
517
518	if (state != GSI_CHANNEL_STATE_STARTED &&
519	    state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
520		dev_err(dev, "bad channel state %u before stop\n", state);
521		return -EINVAL;
522	}
523
524	ret = gsi_channel_command(channel, GSI_CH_STOP);
525
526	/* Channel state will normally have been updated */
527	state = gsi_channel_state(channel);
528	if (ret || state == GSI_CHANNEL_STATE_STOPPED)
529		return ret;
530
531	/* We may have to try again if stop is in progress */
532	if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
533		return -EAGAIN;
534
535	dev_err(dev, "bad channel state %u after stop\n", state);
536
537	return -EIO;
538}
539
540/* Reset a GSI channel in ALLOCATED or ERROR state. */
541static void gsi_channel_reset_command(struct gsi_channel *channel)
542{
543	struct device *dev = channel->gsi->dev;
544	enum gsi_channel_state state;
545	int ret;
546
547	msleep(1);	/* A short delay is required before a RESET command */
548
549	state = gsi_channel_state(channel);
550	if (state != GSI_CHANNEL_STATE_STOPPED &&
551	    state != GSI_CHANNEL_STATE_ERROR) {
552		dev_err(dev, "bad channel state %u before reset\n", state);
553		return;
554	}
555
556	ret = gsi_channel_command(channel, GSI_CH_RESET);
557
558	/* Channel state will normally have been updated */
559	state = gsi_channel_state(channel);
560	if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
561		dev_err(dev, "bad channel state %u after reset\n", state);
562}
563
564/* Deallocate an ALLOCATED GSI channel */
565static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
566{
567	struct gsi_channel *channel = &gsi->channel[channel_id];
568	struct device *dev = gsi->dev;
569	enum gsi_channel_state state;
570	int ret;
571
572	state = gsi_channel_state(channel);
573	if (state != GSI_CHANNEL_STATE_ALLOCATED) {
574		dev_err(dev, "bad channel state %u before dealloc\n", state);
575		return;
576	}
577
578	ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
579
580	/* Channel state will normally have been updated */
581	state = gsi_channel_state(channel);
582	if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
583		dev_err(dev, "bad channel state %u after dealloc\n", state);
584}
585
586/* Ring an event ring doorbell, reporting the last entry processed by the AP.
587 * The index argument (modulo the ring count) is the first unfilled entry, so
588 * we supply one less than that with the doorbell.  Update the event ring
589 * index field with the value provided.
590 */
591static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
592{
593	struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
594	u32 val;
595
596	ring->index = index;	/* Next unused entry */
597
598	/* Note: index *must* be used modulo the ring count here */
599	val = gsi_ring_addr(ring, (index - 1) % ring->count);
600	iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
601}
602
603/* Program an event ring for use */
604static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
605{
606	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
607	size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
608	u32 val;
609
610	val = u32_encode_bits(GSI_EVT_CHTYPE_GPI_EV, EV_CHTYPE_FMASK);
611	val |= EV_INTYPE_FMASK;
612	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
613	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
614
615	val = u32_encode_bits(size, EV_R_LENGTH_FMASK);
616	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
617
618	/* The context 2 and 3 registers store the low-order and
619	 * high-order 32 bits of the address of the event ring,
620	 * respectively.
621	 */
622	val = evt_ring->ring.addr & GENMASK(31, 0);
623	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
624
625	val = evt_ring->ring.addr >> 32;
626	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
627
628	/* Enable interrupt moderation by setting the moderation delay */
629	val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
630	val |= u32_encode_bits(1, MODC_FMASK);	/* comes from channel */
631	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
632
633	/* No MSI write data, and MSI address high and low address is 0 */
634	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
635	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
636	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
637
638	/* We don't need to get event read pointer updates */
639	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
640	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
641
642	/* Finally, tell the hardware we've completed event 0 (arbitrary) */
643	gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
644}
645
646/* Return the last (most recent) transaction completed on a channel. */
647static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
648{
649	struct gsi_trans_info *trans_info = &channel->trans_info;
650	struct gsi_trans *trans;
651
652	spin_lock_bh(&trans_info->spinlock);
653
654	if (!list_empty(&trans_info->complete))
655		trans = list_last_entry(&trans_info->complete,
656					struct gsi_trans, links);
657	else if (!list_empty(&trans_info->polled))
658		trans = list_last_entry(&trans_info->polled,
659					struct gsi_trans, links);
660	else
661		trans = NULL;
662
663	/* Caller will wait for this, so take a reference */
664	if (trans)
665		refcount_inc(&trans->refcount);
666
667	spin_unlock_bh(&trans_info->spinlock);
668
669	return trans;
670}
671
672/* Wait for transaction activity on a channel to complete */
673static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
674{
675	struct gsi_trans *trans;
676
677	/* Get the last transaction, and wait for it to complete */
678	trans = gsi_channel_trans_last(channel);
679	if (trans) {
680		wait_for_completion(&trans->completion);
681		gsi_trans_free(trans);
682	}
683}
684
685/* Stop channel activity.  Transactions may not be allocated until thawed. */
686static void gsi_channel_freeze(struct gsi_channel *channel)
687{
688	gsi_channel_trans_quiesce(channel);
689
690	napi_disable(&channel->napi);
691
692	gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id);
693}
694
695/* Allow transactions to be used on the channel again. */
696static void gsi_channel_thaw(struct gsi_channel *channel)
697{
698	gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
699
700	napi_enable(&channel->napi);
701}
702
703/* Program a channel for use */
704static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
705{
706	size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
707	u32 channel_id = gsi_channel_id(channel);
708	union gsi_channel_scratch scr = { };
709	struct gsi_channel_scratch_gpi *gpi;
710	struct gsi *gsi = channel->gsi;
711	u32 wrr_weight = 0;
712	u32 val;
713
714	/* Arbitrarily pick TRE 0 as the first channel element to use */
715	channel->tre_ring.index = 0;
716
717	/* We program all channels to use GPI protocol */
718	val = u32_encode_bits(GSI_CHANNEL_PROTOCOL_GPI, CHTYPE_PROTOCOL_FMASK);
719	if (channel->toward_ipa)
720		val |= CHTYPE_DIR_FMASK;
721	val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
722	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
723	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
724
725	val = u32_encode_bits(size, R_LENGTH_FMASK);
726	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
727
728	/* The context 2 and 3 registers store the low-order and
729	 * high-order 32 bits of the address of the channel ring,
730	 * respectively.
731	 */
732	val = channel->tre_ring.addr & GENMASK(31, 0);
733	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
734
735	val = channel->tre_ring.addr >> 32;
736	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
737
738	/* Command channel gets low weighted round-robin priority */
739	if (channel->command)
740		wrr_weight = field_max(WRR_WEIGHT_FMASK);
741	val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
742
743	/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
744
745	/* Enable the doorbell engine if requested */
746	if (doorbell)
747		val |= USE_DB_ENG_FMASK;
748
749	if (!channel->use_prefetch)
750		val |= USE_ESCAPE_BUF_ONLY_FMASK;
751
752	iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
753
754	/* Now update the scratch registers for GPI protocol */
755	gpi = &scr.gpi;
756	gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
757					GSI_RING_ELEMENT_SIZE;
758	gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
759
760	val = scr.data.word1;
761	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
762
763	val = scr.data.word2;
764	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
765
766	val = scr.data.word3;
767	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
768
769	/* We must preserve the upper 16 bits of the last scratch register.
770	 * The next sequence assumes those bits remain unchanged between the
771	 * read and the write.
772	 */
773	val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
774	val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
775	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
776
777	/* All done! */
778}
779
780static void gsi_channel_deprogram(struct gsi_channel *channel)
781{
782	/* Nothing to do */
783}
784
785/* Start an allocated GSI channel */
786int gsi_channel_start(struct gsi *gsi, u32 channel_id)
787{
788	struct gsi_channel *channel = &gsi->channel[channel_id];
789	int ret;
790
791	mutex_lock(&gsi->mutex);
792
793	ret = gsi_channel_start_command(channel);
794
795	mutex_unlock(&gsi->mutex);
796
797	gsi_channel_thaw(channel);
798
799	return ret;
800}
801
802/* Stop a started channel */
803int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
804{
805	struct gsi_channel *channel = &gsi->channel[channel_id];
806	u32 retries;
807	int ret;
808
809	gsi_channel_freeze(channel);
810
811	/* RX channels might require a little time to enter STOPPED state */
812	retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
813
814	mutex_lock(&gsi->mutex);
815
816	do {
817		ret = gsi_channel_stop_command(channel);
818		if (ret != -EAGAIN)
819			break;
820		msleep(1);
821	} while (retries--);
822
823	mutex_unlock(&gsi->mutex);
824
825	/* Thaw the channel if we need to retry (or on error) */
826	if (ret)
827		gsi_channel_thaw(channel);
828
829	return ret;
830}
831
832/* Reset and reconfigure a channel (possibly leaving doorbell disabled) */
833void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy)
834{
835	struct gsi_channel *channel = &gsi->channel[channel_id];
836
837	mutex_lock(&gsi->mutex);
838
839	gsi_channel_reset_command(channel);
840	/* Due to a hardware quirk we may need to reset RX channels twice. */
841	if (legacy && !channel->toward_ipa)
842		gsi_channel_reset_command(channel);
843
844	gsi_channel_program(channel, legacy);
845	gsi_channel_trans_cancel_pending(channel);
846
847	mutex_unlock(&gsi->mutex);
848}
849
850/* Stop a STARTED channel for suspend (using stop if requested) */
851int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
852{
853	struct gsi_channel *channel = &gsi->channel[channel_id];
854
855	if (stop)
856		return gsi_channel_stop(gsi, channel_id);
857
858	gsi_channel_freeze(channel);
859
860	return 0;
861}
862
863/* Resume a suspended channel (starting will be requested if STOPPED) */
864int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
865{
866	struct gsi_channel *channel = &gsi->channel[channel_id];
867
868	if (start)
869		return gsi_channel_start(gsi, channel_id);
870
871	gsi_channel_thaw(channel);
872
873	return 0;
874}
875
876/**
877 * gsi_channel_tx_queued() - Report queued TX transfers for a channel
878 * @channel:	Channel for which to report
879 *
880 * Report to the network stack the number of bytes and transactions that
881 * have been queued to hardware since last call.  This and the next function
882 * supply information used by the network stack for throttling.
883 *
884 * For each channel we track the number of transactions used and bytes of
885 * data those transactions represent.  We also track what those values are
886 * each time this function is called.  Subtracting the two tells us
887 * the number of bytes and transactions that have been added between
888 * successive calls.
889 *
890 * Calling this each time we ring the channel doorbell allows us to
891 * provide accurate information to the network stack about how much
892 * work we've given the hardware at any point in time.
893 */
894void gsi_channel_tx_queued(struct gsi_channel *channel)
895{
896	u32 trans_count;
897	u32 byte_count;
898
899	byte_count = channel->byte_count - channel->queued_byte_count;
900	trans_count = channel->trans_count - channel->queued_trans_count;
901	channel->queued_byte_count = channel->byte_count;
902	channel->queued_trans_count = channel->trans_count;
903
904	ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
905				  trans_count, byte_count);
906}
907
908/**
909 * gsi_channel_tx_update() - Report completed TX transfers
910 * @channel:	Channel that has completed transmitting packets
911 * @trans:	Last transation known to be complete
912 *
913 * Compute the number of transactions and bytes that have been transferred
914 * over a TX channel since the given transaction was committed.  Report this
915 * information to the network stack.
916 *
917 * At the time a transaction is committed, we record its channel's
918 * committed transaction and byte counts *in the transaction*.
919 * Completions are signaled by the hardware with an interrupt, and
920 * we can determine the latest completed transaction at that time.
921 *
922 * The difference between the byte/transaction count recorded in
923 * the transaction and the count last time we recorded a completion
924 * tells us exactly how much data has been transferred between
925 * completions.
926 *
927 * Calling this each time we learn of a newly-completed transaction
928 * allows us to provide accurate information to the network stack
929 * about how much work has been completed by the hardware at a given
930 * point in time.
931 */
932static void
933gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
934{
935	u64 byte_count = trans->byte_count + trans->len;
936	u64 trans_count = trans->trans_count + 1;
937
938	byte_count -= channel->compl_byte_count;
939	channel->compl_byte_count += byte_count;
940	trans_count -= channel->compl_trans_count;
941	channel->compl_trans_count += trans_count;
942
943	ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
944				     trans_count, byte_count);
945}
946
947/* Channel control interrupt handler */
948static void gsi_isr_chan_ctrl(struct gsi *gsi)
949{
950	u32 channel_mask;
951
952	channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
953	iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
954
955	while (channel_mask) {
956		u32 channel_id = __ffs(channel_mask);
957		struct gsi_channel *channel;
958
959		channel_mask ^= BIT(channel_id);
960
961		channel = &gsi->channel[channel_id];
962
963		complete(&channel->completion);
964	}
965}
966
967/* Event ring control interrupt handler */
968static void gsi_isr_evt_ctrl(struct gsi *gsi)
969{
970	u32 event_mask;
971
972	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
973	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
974
975	while (event_mask) {
976		u32 evt_ring_id = __ffs(event_mask);
977		struct gsi_evt_ring *evt_ring;
978
979		event_mask ^= BIT(evt_ring_id);
980
981		evt_ring = &gsi->evt_ring[evt_ring_id];
982		evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
983
984		complete(&evt_ring->completion);
985	}
986}
987
988/* Global channel error interrupt handler */
989static void
990gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
991{
992	if (code == GSI_OUT_OF_RESOURCES_ERR) {
993		dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
994		complete(&gsi->channel[channel_id].completion);
995		return;
996	}
997
998	/* Report, but otherwise ignore all other error codes */
999	dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1000		channel_id, err_ee, code);
1001}
1002
1003/* Global event error interrupt handler */
1004static void
1005gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1006{
1007	if (code == GSI_OUT_OF_RESOURCES_ERR) {
1008		struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1009		u32 channel_id = gsi_channel_id(evt_ring->channel);
1010
1011		complete(&evt_ring->completion);
1012		dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1013			channel_id);
1014		return;
1015	}
1016
1017	/* Report, but otherwise ignore all other error codes */
1018	dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1019		evt_ring_id, err_ee, code);
1020}
1021
1022/* Global error interrupt handler */
1023static void gsi_isr_glob_err(struct gsi *gsi)
1024{
1025	enum gsi_err_type type;
1026	enum gsi_err_code code;
1027	u32 which;
1028	u32 val;
1029	u32 ee;
1030
1031	/* Get the logged error, then reinitialize the log */
1032	val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1033	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1034	iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1035
1036	ee = u32_get_bits(val, ERR_EE_FMASK);
1037	which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1038	type = u32_get_bits(val, ERR_TYPE_FMASK);
1039	code = u32_get_bits(val, ERR_CODE_FMASK);
1040
1041	if (type == GSI_ERR_TYPE_CHAN)
1042		gsi_isr_glob_chan_err(gsi, ee, which, code);
1043	else if (type == GSI_ERR_TYPE_EVT)
1044		gsi_isr_glob_evt_err(gsi, ee, which, code);
1045	else	/* type GSI_ERR_TYPE_GLOB should be fatal */
1046		dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1047}
1048
1049/* Generic EE interrupt handler */
1050static void gsi_isr_gp_int1(struct gsi *gsi)
1051{
1052	u32 result;
1053	u32 val;
1054
1055	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1056	result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1057	if (result != GENERIC_EE_SUCCESS_FVAL)
1058		dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1059
1060	complete(&gsi->completion);
1061}
1062
1063/* Inter-EE interrupt handler */
1064static void gsi_isr_glob_ee(struct gsi *gsi)
1065{
1066	u32 val;
1067
1068	val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1069
1070	if (val & ERROR_INT_FMASK)
1071		gsi_isr_glob_err(gsi);
1072
1073	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1074
1075	val &= ~ERROR_INT_FMASK;
1076
1077	if (val & GP_INT1_FMASK) {
1078		val ^= GP_INT1_FMASK;
1079		gsi_isr_gp_int1(gsi);
1080	}
1081
1082	if (val)
1083		dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1084}
1085
1086/* I/O completion interrupt event */
1087static void gsi_isr_ieob(struct gsi *gsi)
1088{
1089	u32 event_mask;
1090
1091	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1092	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
1093
1094	while (event_mask) {
1095		u32 evt_ring_id = __ffs(event_mask);
1096
1097		event_mask ^= BIT(evt_ring_id);
1098
1099		gsi_irq_ieob_disable(gsi, evt_ring_id);
1100		napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1101	}
1102}
1103
1104/* General event interrupts represent serious problems, so report them */
1105static void gsi_isr_general(struct gsi *gsi)
1106{
1107	struct device *dev = gsi->dev;
1108	u32 val;
1109
1110	val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1111	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1112
1113	if (val)
1114		dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1115}
1116
1117/**
1118 * gsi_isr() - Top level GSI interrupt service routine
1119 * @irq:	Interrupt number (ignored)
1120 * @dev_id:	GSI pointer supplied to request_irq()
1121 *
1122 * This is the main handler function registered for the GSI IRQ. Each type
1123 * of interrupt has a separate handler function that is called from here.
1124 */
1125static irqreturn_t gsi_isr(int irq, void *dev_id)
1126{
1127	struct gsi *gsi = dev_id;
1128	u32 intr_mask;
1129	u32 cnt = 0;
1130
1131	while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1132		/* intr_mask contains bitmask of pending GSI interrupts */
1133		do {
1134			u32 gsi_intr = BIT(__ffs(intr_mask));
1135
1136			intr_mask ^= gsi_intr;
1137
1138			switch (gsi_intr) {
1139			case CH_CTRL_FMASK:
1140				gsi_isr_chan_ctrl(gsi);
1141				break;
1142			case EV_CTRL_FMASK:
1143				gsi_isr_evt_ctrl(gsi);
1144				break;
1145			case GLOB_EE_FMASK:
1146				gsi_isr_glob_ee(gsi);
1147				break;
1148			case IEOB_FMASK:
1149				gsi_isr_ieob(gsi);
1150				break;
1151			case GENERAL_FMASK:
1152				gsi_isr_general(gsi);
1153				break;
1154			default:
1155				dev_err(gsi->dev,
1156					"unrecognized interrupt type 0x%08x\n",
1157					gsi_intr);
1158				break;
1159			}
1160		} while (intr_mask);
1161
1162		if (++cnt > GSI_ISR_MAX_ITER) {
1163			dev_err(gsi->dev, "interrupt flood\n");
1164			break;
1165		}
1166	}
1167
1168	return IRQ_HANDLED;
1169}
1170
1171/* Return the transaction associated with a transfer completion event */
1172static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
1173					 struct gsi_event *event)
1174{
1175	u32 tre_offset;
1176	u32 tre_index;
1177
1178	/* Event xfer_ptr records the TRE it's associated with */
1179	tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0);
1180	tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1181
1182	return gsi_channel_trans_mapped(channel, tre_index);
1183}
1184
1185/**
1186 * gsi_evt_ring_rx_update() - Record lengths of received data
1187 * @evt_ring:	Event ring associated with channel that received packets
1188 * @index:	Event index in ring reported by hardware
1189 *
1190 * Events for RX channels contain the actual number of bytes received into
1191 * the buffer.  Every event has a transaction associated with it, and here
1192 * we update transactions to record their actual received lengths.
1193 *
1194 * This function is called whenever we learn that the GSI hardware has filled
1195 * new events since the last time we checked.  The ring's index field tells
1196 * the first entry in need of processing.  The index provided is the
1197 * first *unfilled* event in the ring (following the last filled one).
1198 *
1199 * Events are sequential within the event ring, and transactions are
1200 * sequential within the transaction pool.
1201 *
1202 * Note that @index always refers to an element *within* the event ring.
1203 */
1204static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
1205{
1206	struct gsi_channel *channel = evt_ring->channel;
1207	struct gsi_ring *ring = &evt_ring->ring;
1208	struct gsi_trans_info *trans_info;
1209	struct gsi_event *event_done;
1210	struct gsi_event *event;
1211	struct gsi_trans *trans;
1212	u32 trans_count = 0;
1213	u32 byte_count = 0;
1214	u32 event_avail;
1215	u32 old_index;
1216
1217	trans_info = &channel->trans_info;
1218
1219	/* We'll start with the oldest un-processed event.  RX channels
1220	 * replenish receive buffers in single-TRE transactions, so we
1221	 * can just map that event to its transaction.  Transactions
1222	 * associated with completion events are consecutive.
1223	 */
1224	old_index = ring->index;
1225	event = gsi_ring_virt(ring, old_index);
1226	trans = gsi_event_trans(channel, event);
1227
1228	/* Compute the number of events to process before we wrap,
1229	 * and determine when we'll be done processing events.
1230	 */
1231	event_avail = ring->count - old_index % ring->count;
1232	event_done = gsi_ring_virt(ring, index);
1233	do {
1234		trans->len = __le16_to_cpu(event->len);
1235		byte_count += trans->len;
1236		trans_count++;
1237
1238		/* Move on to the next event and transaction */
1239		if (--event_avail)
1240			event++;
1241		else
1242			event = gsi_ring_virt(ring, 0);
1243		trans = gsi_trans_pool_next(&trans_info->pool, trans);
1244	} while (event != event_done);
1245
1246	/* We record RX bytes when they are received */
1247	channel->byte_count += byte_count;
1248	channel->trans_count += trans_count;
1249}
1250
1251/* Initialize a ring, including allocating DMA memory for its entries */
1252static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1253{
1254	u32 size = count * GSI_RING_ELEMENT_SIZE;
1255	struct device *dev = gsi->dev;
1256	dma_addr_t addr;
1257
1258	/* Hardware requires a 2^n ring size, with alignment equal to size.
1259	 * The DMA address returned by dma_alloc_coherent() is guaranteed to
1260	 * be a power-of-2 number of pages, which satisfies the requirement.
1261	 */
1262	ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1263	if (!ring->virt)
1264		return -ENOMEM;
1265
1266	ring->addr = addr;
1267	ring->count = count;
1268
1269	return 0;
1270}
1271
1272/* Free a previously-allocated ring */
1273static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1274{
1275	size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1276
1277	dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1278}
1279
1280/* Allocate an available event ring id */
1281static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1282{
1283	u32 evt_ring_id;
1284
1285	if (gsi->event_bitmap == ~0U) {
1286		dev_err(gsi->dev, "event rings exhausted\n");
1287		return -ENOSPC;
1288	}
1289
1290	evt_ring_id = ffz(gsi->event_bitmap);
1291	gsi->event_bitmap |= BIT(evt_ring_id);
1292
1293	return (int)evt_ring_id;
1294}
1295
1296/* Free a previously-allocated event ring id */
1297static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1298{
1299	gsi->event_bitmap &= ~BIT(evt_ring_id);
1300}
1301
1302/* Ring a channel doorbell, reporting the first un-filled entry */
1303void gsi_channel_doorbell(struct gsi_channel *channel)
1304{
1305	struct gsi_ring *tre_ring = &channel->tre_ring;
1306	u32 channel_id = gsi_channel_id(channel);
1307	struct gsi *gsi = channel->gsi;
1308	u32 val;
1309
1310	/* Note: index *must* be used modulo the ring count here */
1311	val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1312	iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1313}
1314
1315/* Consult hardware, move any newly completed transactions to completed list */
1316static void gsi_channel_update(struct gsi_channel *channel)
1317{
1318	u32 evt_ring_id = channel->evt_ring_id;
1319	struct gsi *gsi = channel->gsi;
1320	struct gsi_evt_ring *evt_ring;
1321	struct gsi_trans *trans;
1322	struct gsi_ring *ring;
1323	u32 offset;
1324	u32 index;
1325
1326	evt_ring = &gsi->evt_ring[evt_ring_id];
1327	ring = &evt_ring->ring;
1328
1329	/* See if there's anything new to process; if not, we're done.  Note
1330	 * that index always refers to an entry *within* the event ring.
1331	 */
1332	offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1333	index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1334	if (index == ring->index % ring->count)
1335		return;
1336
1337	/* Get the transaction for the latest completed event.  Take a
1338	 * reference to keep it from completing before we give the events
1339	 * for this and previous transactions back to the hardware.
1340	 */
1341	trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
1342	refcount_inc(&trans->refcount);
1343
1344	/* For RX channels, update each completed transaction with the number
1345	 * of bytes that were actually received.  For TX channels, report
1346	 * the number of transactions and bytes this completion represents
1347	 * up the network stack.
1348	 */
1349	if (channel->toward_ipa)
1350		gsi_channel_tx_update(channel, trans);
1351	else
1352		gsi_evt_ring_rx_update(evt_ring, index);
1353
1354	gsi_trans_move_complete(trans);
1355
1356	/* Tell the hardware we've handled these events */
1357	gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
1358
1359	gsi_trans_free(trans);
1360}
1361
1362/**
1363 * gsi_channel_poll_one() - Return a single completed transaction on a channel
1364 * @channel:	Channel to be polled
1365 *
1366 * Return:	Transaction pointer, or null if none are available
1367 *
1368 * This function returns the first entry on a channel's completed transaction
1369 * list.  If that list is empty, the hardware is consulted to determine
1370 * whether any new transactions have completed.  If so, they're moved to the
1371 * completed list and the new first entry is returned.  If there are no more
1372 * completed transactions, a null pointer is returned.
1373 */
1374static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1375{
1376	struct gsi_trans *trans;
1377
1378	/* Get the first transaction from the completed list */
1379	trans = gsi_channel_trans_complete(channel);
1380	if (!trans) {
1381		/* List is empty; see if there's more to do */
1382		gsi_channel_update(channel);
1383		trans = gsi_channel_trans_complete(channel);
1384	}
1385
1386	if (trans)
1387		gsi_trans_move_polled(trans);
1388
1389	return trans;
1390}
1391
1392/**
1393 * gsi_channel_poll() - NAPI poll function for a channel
1394 * @napi:	NAPI structure for the channel
1395 * @budget:	Budget supplied by NAPI core
1396 *
1397 * Return:	Number of items polled (<= budget)
1398 *
1399 * Single transactions completed by hardware are polled until either
1400 * the budget is exhausted, or there are no more.  Each transaction
1401 * polled is passed to gsi_trans_complete(), to perform remaining
1402 * completion processing and retire/free the transaction.
1403 */
1404static int gsi_channel_poll(struct napi_struct *napi, int budget)
1405{
1406	struct gsi_channel *channel;
1407	int count = 0;
1408
1409	channel = container_of(napi, struct gsi_channel, napi);
1410	while (count < budget) {
1411		struct gsi_trans *trans;
1412
1413		count++;
1414		trans = gsi_channel_poll_one(channel);
1415		if (!trans)
1416			break;
1417		gsi_trans_complete(trans);
1418	}
1419
1420	if (count < budget) {
1421		napi_complete(&channel->napi);
1422		gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
1423	}
1424
1425	return count;
1426}
1427
1428/* The event bitmap represents which event ids are available for allocation.
1429 * Set bits are not available, clear bits can be used.  This function
1430 * initializes the map so all events supported by the hardware are available,
1431 * then precludes any reserved events from being allocated.
1432 */
1433static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1434{
1435	u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1436
1437	event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1438
1439	return event_bitmap;
1440}
1441
1442/* Setup function for event rings */
1443static void gsi_evt_ring_setup(struct gsi *gsi)
1444{
1445	/* Nothing to do */
1446}
1447
1448/* Inverse of gsi_evt_ring_setup() */
1449static void gsi_evt_ring_teardown(struct gsi *gsi)
1450{
1451	/* Nothing to do */
1452}
1453
1454/* Setup function for a single channel */
1455static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
1456				 bool legacy)
1457{
1458	struct gsi_channel *channel = &gsi->channel[channel_id];
1459	u32 evt_ring_id = channel->evt_ring_id;
1460	int ret;
1461
1462	if (!channel->gsi)
1463		return 0;	/* Ignore uninitialized channels */
1464
1465	ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1466	if (ret)
1467		return ret;
1468
1469	gsi_evt_ring_program(gsi, evt_ring_id);
1470
1471	ret = gsi_channel_alloc_command(gsi, channel_id);
1472	if (ret)
1473		goto err_evt_ring_de_alloc;
1474
1475	gsi_channel_program(channel, legacy);
1476
1477	if (channel->toward_ipa)
1478		netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
1479				  gsi_channel_poll, NAPI_POLL_WEIGHT);
1480	else
1481		netif_napi_add(&gsi->dummy_dev, &channel->napi,
1482			       gsi_channel_poll, NAPI_POLL_WEIGHT);
1483
1484	return 0;
1485
1486err_evt_ring_de_alloc:
1487	/* We've done nothing with the event ring yet so don't reset */
1488	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1489
1490	return ret;
1491}
1492
1493/* Inverse of gsi_channel_setup_one() */
1494static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1495{
1496	struct gsi_channel *channel = &gsi->channel[channel_id];
1497	u32 evt_ring_id = channel->evt_ring_id;
1498
1499	if (!channel->gsi)
1500		return;		/* Ignore uninitialized channels */
1501
1502	netif_napi_del(&channel->napi);
1503
1504	gsi_channel_deprogram(channel);
1505	gsi_channel_de_alloc_command(gsi, channel_id);
1506	gsi_evt_ring_reset_command(gsi, evt_ring_id);
1507	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1508}
1509
1510static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1511			       enum gsi_generic_cmd_opcode opcode)
1512{
1513	struct completion *completion = &gsi->completion;
1514	u32 val;
1515
1516	/* First zero the result code field */
1517	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1518	val &= ~GENERIC_EE_RESULT_FMASK;
1519	iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1520
1521	/* Now issue the command */
1522	val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1523	val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1524	val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1525
1526	if (gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion))
1527		return 0;	/* Success! */
1528
1529	dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1530		opcode, channel_id);
1531
1532	return -ETIMEDOUT;
1533}
1534
1535static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1536{
1537	return gsi_generic_command(gsi, channel_id,
1538				   GSI_GENERIC_ALLOCATE_CHANNEL);
1539}
1540
1541static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1542{
1543	int ret;
1544
1545	ret = gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL);
1546	if (ret)
1547		dev_err(gsi->dev, "error %d halting modem channel %u\n",
1548			ret, channel_id);
1549}
1550
1551/* Setup function for channels */
1552static int gsi_channel_setup(struct gsi *gsi, bool legacy)
1553{
1554	u32 channel_id = 0;
1555	u32 mask;
1556	int ret;
1557
1558	gsi_evt_ring_setup(gsi);
1559	gsi_irq_enable(gsi);
1560
1561	mutex_lock(&gsi->mutex);
1562
1563	do {
1564		ret = gsi_channel_setup_one(gsi, channel_id, legacy);
1565		if (ret)
1566			goto err_unwind;
1567	} while (++channel_id < gsi->channel_count);
1568
1569	/* Make sure no channels were defined that hardware does not support */
1570	while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1571		struct gsi_channel *channel = &gsi->channel[channel_id++];
1572
1573		if (!channel->gsi)
1574			continue;	/* Ignore uninitialized channels */
1575
1576		ret = -EINVAL;
1577		dev_err(gsi->dev, "channel %u not supported by hardware\n",
1578			channel_id - 1);
1579		channel_id = gsi->channel_count;
1580		goto err_unwind;
1581	}
1582
1583	/* Allocate modem channels if necessary */
1584	mask = gsi->modem_channel_bitmap;
1585	while (mask) {
1586		u32 modem_channel_id = __ffs(mask);
1587
1588		ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1589		if (ret)
1590			goto err_unwind_modem;
1591
1592		/* Clear bit from mask only after success (for unwind) */
1593		mask ^= BIT(modem_channel_id);
1594	}
1595
1596	mutex_unlock(&gsi->mutex);
1597
1598	return 0;
1599
1600err_unwind_modem:
1601	/* Compute which modem channels need to be deallocated */
1602	mask ^= gsi->modem_channel_bitmap;
1603	while (mask) {
1604		channel_id = __fls(mask);
1605
1606		mask ^= BIT(channel_id);
1607
1608		gsi_modem_channel_halt(gsi, channel_id);
1609	}
1610
1611err_unwind:
1612	while (channel_id--)
1613		gsi_channel_teardown_one(gsi, channel_id);
1614
1615	mutex_unlock(&gsi->mutex);
1616
1617	gsi_irq_disable(gsi);
1618	gsi_evt_ring_teardown(gsi);
1619
1620	return ret;
1621}
1622
1623/* Inverse of gsi_channel_setup() */
1624static void gsi_channel_teardown(struct gsi *gsi)
1625{
1626	u32 mask = gsi->modem_channel_bitmap;
1627	u32 channel_id;
1628
1629	mutex_lock(&gsi->mutex);
1630
1631	while (mask) {
1632		channel_id = __fls(mask);
1633
1634		mask ^= BIT(channel_id);
1635
1636		gsi_modem_channel_halt(gsi, channel_id);
1637	}
1638
1639	channel_id = gsi->channel_count - 1;
1640	do
1641		gsi_channel_teardown_one(gsi, channel_id);
1642	while (channel_id--);
1643
1644	mutex_unlock(&gsi->mutex);
1645
1646	gsi_irq_disable(gsi);
1647	gsi_evt_ring_teardown(gsi);
1648}
1649
1650/* Setup function for GSI.  GSI firmware must be loaded and initialized */
1651int gsi_setup(struct gsi *gsi, bool legacy)
1652{
1653	struct device *dev = gsi->dev;
1654	u32 val;
1655
1656	/* Here is where we first touch the GSI hardware */
1657	val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1658	if (!(val & ENABLED_FMASK)) {
1659		dev_err(dev, "GSI has not been enabled\n");
1660		return -EIO;
1661	}
1662
1663	val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1664
1665	gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1666	if (!gsi->channel_count) {
1667		dev_err(dev, "GSI reports zero channels supported\n");
1668		return -EINVAL;
1669	}
1670	if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
1671		dev_warn(dev,
1672			 "limiting to %u channels; hardware supports %u\n",
1673			 GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
1674		gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1675	}
1676
1677	gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1678	if (!gsi->evt_ring_count) {
1679		dev_err(dev, "GSI reports zero event rings supported\n");
1680		return -EINVAL;
1681	}
1682	if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
1683		dev_warn(dev,
1684			 "limiting to %u event rings; hardware supports %u\n",
1685			 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
1686		gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1687	}
1688
1689	/* Initialize the error log */
1690	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1691
1692	/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1693	iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1694
1695	return gsi_channel_setup(gsi, legacy);
1696}
1697
1698/* Inverse of gsi_setup() */
1699void gsi_teardown(struct gsi *gsi)
1700{
1701	gsi_channel_teardown(gsi);
1702}
1703
1704/* Initialize a channel's event ring */
1705static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1706{
1707	struct gsi *gsi = channel->gsi;
1708	struct gsi_evt_ring *evt_ring;
1709	int ret;
1710
1711	ret = gsi_evt_ring_id_alloc(gsi);
1712	if (ret < 0)
1713		return ret;
1714	channel->evt_ring_id = ret;
1715
1716	evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1717	evt_ring->channel = channel;
1718
1719	ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1720	if (!ret)
1721		return 0;	/* Success! */
1722
1723	dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1724		ret, gsi_channel_id(channel));
1725
1726	gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1727
1728	return ret;
1729}
1730
1731/* Inverse of gsi_channel_evt_ring_init() */
1732static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
1733{
1734	u32 evt_ring_id = channel->evt_ring_id;
1735	struct gsi *gsi = channel->gsi;
1736	struct gsi_evt_ring *evt_ring;
1737
1738	evt_ring = &gsi->evt_ring[evt_ring_id];
1739	gsi_ring_free(gsi, &evt_ring->ring);
1740	gsi_evt_ring_id_free(gsi, evt_ring_id);
1741}
1742
1743/* Init function for event rings */
1744static void gsi_evt_ring_init(struct gsi *gsi)
1745{
1746	u32 evt_ring_id = 0;
1747
1748	gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
1749	gsi->event_enable_bitmap = 0;
1750	do
1751		init_completion(&gsi->evt_ring[evt_ring_id].completion);
1752	while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
1753}
1754
1755/* Inverse of gsi_evt_ring_init() */
1756static void gsi_evt_ring_exit(struct gsi *gsi)
1757{
1758	/* Nothing to do */
1759}
1760
1761static bool gsi_channel_data_valid(struct gsi *gsi,
1762				   const struct ipa_gsi_endpoint_data *data)
1763{
1764#ifdef IPA_VALIDATION
1765	u32 channel_id = data->channel_id;
1766	struct device *dev = gsi->dev;
1767
1768	/* Make sure channel ids are in the range driver supports */
1769	if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
1770		dev_err(dev, "bad channel id %u; must be less than %u\n",
1771			channel_id, GSI_CHANNEL_COUNT_MAX);
1772		return false;
1773	}
1774
1775	if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
1776		dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
1777		return false;
1778	}
1779
1780	if (!data->channel.tlv_count ||
1781	    data->channel.tlv_count > GSI_TLV_MAX) {
1782		dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
1783			channel_id, data->channel.tlv_count, GSI_TLV_MAX);
1784		return false;
1785	}
1786
1787	/* We have to allow at least one maximally-sized transaction to
1788	 * be outstanding (which would use tlv_count TREs).  Given how
1789	 * gsi_channel_tre_max() is computed, tre_count has to be almost
1790	 * twice the TLV FIFO size to satisfy this requirement.
1791	 */
1792	if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
1793		dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
1794			channel_id, data->channel.tlv_count,
1795			data->channel.tre_count);
1796		return false;
1797	}
1798
1799	if (!is_power_of_2(data->channel.tre_count)) {
1800		dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
1801			channel_id, data->channel.tre_count);
1802		return false;
1803	}
1804
1805	if (!is_power_of_2(data->channel.event_count)) {
1806		dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
1807			channel_id, data->channel.event_count);
1808		return false;
1809	}
1810#endif /* IPA_VALIDATION */
1811
1812	return true;
1813}
1814
1815/* Init function for a single channel */
1816static int gsi_channel_init_one(struct gsi *gsi,
1817				const struct ipa_gsi_endpoint_data *data,
1818				bool command, bool prefetch)
1819{
1820	struct gsi_channel *channel;
1821	u32 tre_count;
1822	int ret;
1823
1824	if (!gsi_channel_data_valid(gsi, data))
1825		return -EINVAL;
1826
1827	/* Worst case we need an event for every outstanding TRE */
1828	if (data->channel.tre_count > data->channel.event_count) {
1829		tre_count = data->channel.event_count;
1830		dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
1831			 data->channel_id, tre_count);
1832	} else {
1833		tre_count = data->channel.tre_count;
1834	}
1835
1836	channel = &gsi->channel[data->channel_id];
1837	memset(channel, 0, sizeof(*channel));
1838
1839	channel->gsi = gsi;
1840	channel->toward_ipa = data->toward_ipa;
1841	channel->command = command;
1842	channel->use_prefetch = command && prefetch;
1843	channel->tlv_count = data->channel.tlv_count;
1844	channel->tre_count = tre_count;
1845	channel->event_count = data->channel.event_count;
1846	init_completion(&channel->completion);
1847
1848	ret = gsi_channel_evt_ring_init(channel);
1849	if (ret)
1850		goto err_clear_gsi;
1851
1852	ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
1853	if (ret) {
1854		dev_err(gsi->dev, "error %d allocating channel %u ring\n",
1855			ret, data->channel_id);
1856		goto err_channel_evt_ring_exit;
1857	}
1858
1859	ret = gsi_channel_trans_init(gsi, data->channel_id);
1860	if (ret)
1861		goto err_ring_free;
1862
1863	if (command) {
1864		u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
1865
1866		ret = ipa_cmd_pool_init(channel, tre_max);
1867	}
1868	if (!ret)
1869		return 0;	/* Success! */
1870
1871	gsi_channel_trans_exit(channel);
1872err_ring_free:
1873	gsi_ring_free(gsi, &channel->tre_ring);
1874err_channel_evt_ring_exit:
1875	gsi_channel_evt_ring_exit(channel);
1876err_clear_gsi:
1877	channel->gsi = NULL;	/* Mark it not (fully) initialized */
1878
1879	return ret;
1880}
1881
1882/* Inverse of gsi_channel_init_one() */
1883static void gsi_channel_exit_one(struct gsi_channel *channel)
1884{
1885	if (!channel->gsi)
1886		return;		/* Ignore uninitialized channels */
1887
1888	if (channel->command)
1889		ipa_cmd_pool_exit(channel);
1890	gsi_channel_trans_exit(channel);
1891	gsi_ring_free(channel->gsi, &channel->tre_ring);
1892	gsi_channel_evt_ring_exit(channel);
1893}
1894
1895/* Init function for channels */
1896static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count,
1897			    const struct ipa_gsi_endpoint_data *data,
1898			    bool modem_alloc)
1899{
1900	int ret = 0;
1901	u32 i;
1902
1903	gsi_evt_ring_init(gsi);
1904
1905	/* The endpoint data array is indexed by endpoint name */
1906	for (i = 0; i < count; i++) {
1907		bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
1908
1909		if (ipa_gsi_endpoint_data_empty(&data[i]))
1910			continue;	/* Skip over empty slots */
1911
1912		/* Mark modem channels to be allocated (hardware workaround) */
1913		if (data[i].ee_id == GSI_EE_MODEM) {
1914			if (modem_alloc)
1915				gsi->modem_channel_bitmap |=
1916						BIT(data[i].channel_id);
1917			continue;
1918		}
1919
1920		ret = gsi_channel_init_one(gsi, &data[i], command, prefetch);
1921		if (ret)
1922			goto err_unwind;
1923	}
1924
1925	return ret;
1926
1927err_unwind:
1928	while (i--) {
1929		if (ipa_gsi_endpoint_data_empty(&data[i]))
1930			continue;
1931		if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
1932			gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
1933			continue;
1934		}
1935		gsi_channel_exit_one(&gsi->channel[data->channel_id]);
1936	}
1937	gsi_evt_ring_exit(gsi);
1938
1939	return ret;
1940}
1941
1942/* Inverse of gsi_channel_init() */
1943static void gsi_channel_exit(struct gsi *gsi)
1944{
1945	u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
1946
1947	do
1948		gsi_channel_exit_one(&gsi->channel[channel_id]);
1949	while (channel_id--);
1950	gsi->modem_channel_bitmap = 0;
1951
1952	gsi_evt_ring_exit(gsi);
1953}
1954
1955/* Init function for GSI.  GSI hardware does not need to be "ready" */
1956int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
1957	     u32 count, const struct ipa_gsi_endpoint_data *data,
1958	     bool modem_alloc)
1959{
1960	struct device *dev = &pdev->dev;
1961	struct resource *res;
1962	resource_size_t size;
1963	unsigned int irq;
1964	int ret;
1965
1966	gsi_validate_build();
1967
1968	gsi->dev = dev;
1969
1970	/* The GSI layer performs NAPI on all endpoints.  NAPI requires a
1971	 * network device structure, but the GSI layer does not have one,
1972	 * so we must create a dummy network device for this purpose.
1973	 */
1974	init_dummy_netdev(&gsi->dummy_dev);
1975
1976	ret = platform_get_irq_byname(pdev, "gsi");
1977	if (ret <= 0) {
1978		dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
1979		return ret ? : -EINVAL;
1980	}
1981	irq = ret;
1982
1983	ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
1984	if (ret) {
1985		dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
1986		return ret;
1987	}
1988	gsi->irq = irq;
1989
1990	/* Get GSI memory range and map it */
1991	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
1992	if (!res) {
1993		dev_err(dev, "DT error getting \"gsi\" memory property\n");
1994		ret = -ENODEV;
1995		goto err_free_irq;
1996	}
1997
1998	size = resource_size(res);
1999	if (res->start > U32_MAX || size > U32_MAX - res->start) {
2000		dev_err(dev, "DT memory resource \"gsi\" out of range\n");
2001		ret = -EINVAL;
2002		goto err_free_irq;
2003	}
2004
2005	gsi->virt = ioremap(res->start, size);
2006	if (!gsi->virt) {
2007		dev_err(dev, "unable to remap \"gsi\" memory\n");
2008		ret = -ENOMEM;
2009		goto err_free_irq;
2010	}
2011
2012	ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
2013	if (ret)
2014		goto err_iounmap;
2015
2016	mutex_init(&gsi->mutex);
2017	init_completion(&gsi->completion);
2018
2019	return 0;
2020
2021err_iounmap:
2022	iounmap(gsi->virt);
2023err_free_irq:
2024	free_irq(gsi->irq, gsi);
2025
2026	return ret;
2027}
2028
2029/* Inverse of gsi_init() */
2030void gsi_exit(struct gsi *gsi)
2031{
2032	mutex_destroy(&gsi->mutex);
2033	gsi_channel_exit(gsi);
2034	free_irq(gsi->irq, gsi);
2035	iounmap(gsi->virt);
2036}
2037
2038/* The maximum number of outstanding TREs on a channel.  This limits
2039 * a channel's maximum number of transactions outstanding (worst case
2040 * is one TRE per transaction).
2041 *
2042 * The absolute limit is the number of TREs in the channel's TRE ring,
2043 * and in theory we should be able use all of them.  But in practice,
2044 * doing that led to the hardware reporting exhaustion of event ring
2045 * slots for writing completion information.  So the hardware limit
2046 * would be (tre_count - 1).
2047 *
2048 * We reduce it a bit further though.  Transaction resource pools are
2049 * sized to be a little larger than this maximum, to allow resource
2050 * allocations to always be contiguous.  The number of entries in a
2051 * TRE ring buffer is a power of 2, and the extra resources in a pool
2052 * tends to nearly double the memory allocated for it.  Reducing the
2053 * maximum number of outstanding TREs allows the number of entries in
2054 * a pool to avoid crossing that power-of-2 boundary, and this can
2055 * substantially reduce pool memory requirements.  The number we
2056 * reduce it by matches the number added in gsi_trans_pool_init().
2057 */
2058u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2059{
2060	struct gsi_channel *channel = &gsi->channel[channel_id];
2061
2062	/* Hardware limit is channel->tre_count - 1 */
2063	return channel->tre_count - (channel->tlv_count - 1);
2064}
2065
2066/* Returns the maximum number of TREs in a single transaction for a channel */
2067u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
2068{
2069	struct gsi_channel *channel = &gsi->channel[channel_id];
2070
2071	return channel->tlv_count;
2072}
2073