xref: /kernel/linux/linux-6.6/drivers/bus/mhi/ep/ring.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2022 Linaro Ltd.
4 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
5 */
6
7#include <linux/mhi_ep.h>
8#include "internal.h"
9
10size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr)
11{
12	return (ptr - ring->rbase) / sizeof(struct mhi_ring_element);
13}
14
15static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring)
16{
17	__le64 rlen;
18
19	memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64));
20
21	return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element);
22}
23
24void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring)
25{
26	ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size;
27}
28
29static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
30{
31	struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
32	struct device *dev = &mhi_cntrl->mhi_dev->dev;
33	struct mhi_ep_buf_info buf_info = {};
34	size_t start;
35	int ret;
36
37	/* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
38	if (ring->type == RING_TYPE_ER)
39		return 0;
40
41	/* No need to cache the ring if write pointer is unmodified */
42	if (ring->wr_offset == end)
43		return 0;
44
45	start = ring->wr_offset;
46	if (start < end) {
47		buf_info.size = (end - start) * sizeof(struct mhi_ring_element);
48		buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
49		buf_info.dev_addr = &ring->ring_cache[start];
50
51		ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
52		if (ret < 0)
53			return ret;
54	} else {
55		buf_info.size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
56		buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
57		buf_info.dev_addr = &ring->ring_cache[start];
58
59		ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
60		if (ret < 0)
61			return ret;
62
63		if (end) {
64			buf_info.host_addr = ring->rbase;
65			buf_info.dev_addr = &ring->ring_cache[0];
66			buf_info.size = end * sizeof(struct mhi_ring_element);
67
68			ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
69			if (ret < 0)
70				return ret;
71		}
72	}
73
74	dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, buf_info.size);
75
76	return 0;
77}
78
79static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr)
80{
81	size_t wr_offset;
82	int ret;
83
84	wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr);
85
86	/* Cache the host ring till write offset */
87	ret = __mhi_ep_cache_ring(ring, wr_offset);
88	if (ret)
89		return ret;
90
91	ring->wr_offset = wr_offset;
92
93	return 0;
94}
95
96int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring)
97{
98	u64 wr_ptr;
99
100	wr_ptr = mhi_ep_mmio_get_db(ring);
101
102	return mhi_ep_cache_ring(ring, wr_ptr);
103}
104
105/* TODO: Support for adding multiple ring elements to the ring */
106int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
107{
108	struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
109	struct device *dev = &mhi_cntrl->mhi_dev->dev;
110	struct mhi_ep_buf_info buf_info = {};
111	size_t old_offset = 0;
112	u32 num_free_elem;
113	__le64 rp;
114	int ret;
115
116	ret = mhi_ep_update_wr_offset(ring);
117	if (ret) {
118		dev_err(dev, "Error updating write pointer\n");
119		return ret;
120	}
121
122	if (ring->rd_offset < ring->wr_offset)
123		num_free_elem = (ring->wr_offset - ring->rd_offset) - 1;
124	else
125		num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1;
126
127	/* Check if there is space in ring for adding at least an element */
128	if (!num_free_elem) {
129		dev_err(dev, "No space left in the ring\n");
130		return -ENOSPC;
131	}
132
133	old_offset = ring->rd_offset;
134	mhi_ep_ring_inc_index(ring);
135
136	dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
137
138	/* Update rp in ring context */
139	rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
140	memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
141
142	buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
143	buf_info.dev_addr = el;
144	buf_info.size = sizeof(*el);
145
146	return mhi_cntrl->write_to_host(mhi_cntrl, &buf_info);
147}
148
149void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
150{
151	ring->type = type;
152	if (ring->type == RING_TYPE_CMD) {
153		ring->db_offset_h = EP_CRDB_HIGHER;
154		ring->db_offset_l = EP_CRDB_LOWER;
155	} else if (ring->type == RING_TYPE_CH) {
156		ring->db_offset_h = CHDB_HIGHER_n(id);
157		ring->db_offset_l = CHDB_LOWER_n(id);
158		ring->ch_id = id;
159	} else {
160		ring->db_offset_h = ERDB_HIGHER_n(id);
161		ring->db_offset_l = ERDB_LOWER_n(id);
162	}
163}
164
165int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
166			union mhi_ep_ring_ctx *ctx)
167{
168	struct device *dev = &mhi_cntrl->mhi_dev->dev;
169	__le64 val;
170	int ret;
171
172	ring->mhi_cntrl = mhi_cntrl;
173	ring->ring_ctx = ctx;
174	ring->ring_size = mhi_ep_ring_num_elems(ring);
175	memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64));
176	ring->rbase = le64_to_cpu(val);
177
178	if (ring->type == RING_TYPE_CH)
179		ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex);
180
181	if (ring->type == RING_TYPE_ER)
182		ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec);
183
184	/* During ring init, both rp and wp are equal */
185	memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64));
186	ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
187	ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val));
188
189	/* Allocate ring cache memory for holding the copy of host ring */
190	ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL);
191	if (!ring->ring_cache)
192		return -ENOMEM;
193
194	memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64));
195	ret = mhi_ep_cache_ring(ring, le64_to_cpu(val));
196	if (ret) {
197		dev_err(dev, "Failed to cache ring\n");
198		kfree(ring->ring_cache);
199		return ret;
200	}
201
202	ring->started = true;
203
204	return 0;
205}
206
207void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
208{
209	ring->started = false;
210	kfree(ring->ring_cache);
211	ring->ring_cache = NULL;
212}
213