1// SPDX-License-Identifier: BSD-3-Clause
2/*
3 * Copyright (c) 2020, MIPI Alliance, Inc.
4 *
5 * Author: Nicolas Pitre <npitre@baylibre.com>
6 *
7 * I3C HCI v1.0/v1.1 Command Descriptor Handling
8 */
9
10#include <linux/bitfield.h>
11#include <linux/i3c/master.h>
12
13#include "hci.h"
14#include "cmd.h"
15#include "dat.h"
16#include "dct.h"
17
18
19/*
20 * Address Assignment Command
21 */
22
23#define CMD_0_ATTR_A			FIELD_PREP(CMD_0_ATTR, 0x2)
24
25#define CMD_A0_TOC				   W0_BIT_(31)
26#define CMD_A0_ROC				   W0_BIT_(30)
27#define CMD_A0_DEV_COUNT(v)		FIELD_PREP(W0_MASK(29, 26), v)
28#define CMD_A0_DEV_INDEX(v)		FIELD_PREP(W0_MASK(20, 16), v)
29#define CMD_A0_CMD(v)			FIELD_PREP(W0_MASK(14,  7), v)
30#define CMD_A0_TID(v)			FIELD_PREP(W0_MASK( 6,  3), v)
31
32/*
33 * Immediate Data Transfer Command
34 */
35
36#define CMD_0_ATTR_I			FIELD_PREP(CMD_0_ATTR, 0x1)
37
38#define CMD_I1_DATA_BYTE_4(v)		FIELD_PREP(W1_MASK(63, 56), v)
39#define CMD_I1_DATA_BYTE_3(v)		FIELD_PREP(W1_MASK(55, 48), v)
40#define CMD_I1_DATA_BYTE_2(v)		FIELD_PREP(W1_MASK(47, 40), v)
41#define CMD_I1_DATA_BYTE_1(v)		FIELD_PREP(W1_MASK(39, 32), v)
42#define CMD_I1_DEF_BYTE(v)		FIELD_PREP(W1_MASK(39, 32), v)
43#define CMD_I0_TOC				   W0_BIT_(31)
44#define CMD_I0_ROC				   W0_BIT_(30)
45#define CMD_I0_RNW				   W0_BIT_(29)
46#define CMD_I0_MODE(v)			FIELD_PREP(W0_MASK(28, 26), v)
47#define CMD_I0_DTT(v)			FIELD_PREP(W0_MASK(25, 23), v)
48#define CMD_I0_DEV_INDEX(v)		FIELD_PREP(W0_MASK(20, 16), v)
49#define CMD_I0_CP				   W0_BIT_(15)
50#define CMD_I0_CMD(v)			FIELD_PREP(W0_MASK(14,  7), v)
51#define CMD_I0_TID(v)			FIELD_PREP(W0_MASK( 6,  3), v)
52
53/*
54 * Regular Data Transfer Command
55 */
56
57#define CMD_0_ATTR_R			FIELD_PREP(CMD_0_ATTR, 0x0)
58
59#define CMD_R1_DATA_LENGTH(v)		FIELD_PREP(W1_MASK(63, 48), v)
60#define CMD_R1_DEF_BYTE(v)		FIELD_PREP(W1_MASK(39, 32), v)
61#define CMD_R0_TOC				   W0_BIT_(31)
62#define CMD_R0_ROC				   W0_BIT_(30)
63#define CMD_R0_RNW				   W0_BIT_(29)
64#define CMD_R0_MODE(v)			FIELD_PREP(W0_MASK(28, 26), v)
65#define CMD_R0_DBP				   W0_BIT_(25)
66#define CMD_R0_DEV_INDEX(v)		FIELD_PREP(W0_MASK(20, 16), v)
67#define CMD_R0_CP				   W0_BIT_(15)
68#define CMD_R0_CMD(v)			FIELD_PREP(W0_MASK(14,  7), v)
69#define CMD_R0_TID(v)			FIELD_PREP(W0_MASK( 6,  3), v)
70
71/*
72 * Combo Transfer (Write + Write/Read) Command
73 */
74
75#define CMD_0_ATTR_C			FIELD_PREP(CMD_0_ATTR, 0x3)
76
77#define CMD_C1_DATA_LENGTH(v)		FIELD_PREP(W1_MASK(63, 48), v)
78#define CMD_C1_OFFSET(v)		FIELD_PREP(W1_MASK(47, 32), v)
79#define CMD_C0_TOC				   W0_BIT_(31)
80#define CMD_C0_ROC				   W0_BIT_(30)
81#define CMD_C0_RNW				   W0_BIT_(29)
82#define CMD_C0_MODE(v)			FIELD_PREP(W0_MASK(28, 26), v)
83#define CMD_C0_16_BIT_SUBOFFSET			   W0_BIT_(25)
84#define CMD_C0_FIRST_PHASE_MODE			   W0_BIT_(24)
85#define CMD_C0_DATA_LENGTH_POSITION(v)	FIELD_PREP(W0_MASK(23, 22), v)
86#define CMD_C0_DEV_INDEX(v)		FIELD_PREP(W0_MASK(20, 16), v)
87#define CMD_C0_CP				   W0_BIT_(15)
88#define CMD_C0_CMD(v)			FIELD_PREP(W0_MASK(14,  7), v)
89#define CMD_C0_TID(v)			FIELD_PREP(W0_MASK( 6,  3), v)
90
91/*
92 * Internal Control Command
93 */
94
95#define CMD_0_ATTR_M			FIELD_PREP(CMD_0_ATTR, 0x7)
96
97#define CMD_M1_VENDOR_SPECIFIC			   W1_MASK(63, 32)
98#define CMD_M0_MIPI_RESERVED			   W0_MASK(31, 12)
99#define CMD_M0_MIPI_CMD				   W0_MASK(11,  8)
100#define CMD_M0_VENDOR_INFO_PRESENT		   W0_BIT_( 7)
101#define CMD_M0_TID(v)			FIELD_PREP(W0_MASK( 6,  3), v)
102
103
104/* Data Transfer Speed and Mode */
105enum hci_cmd_mode {
106	MODE_I3C_SDR0		= 0x0,
107	MODE_I3C_SDR1		= 0x1,
108	MODE_I3C_SDR2		= 0x2,
109	MODE_I3C_SDR3		= 0x3,
110	MODE_I3C_SDR4		= 0x4,
111	MODE_I3C_HDR_TSx	= 0x5,
112	MODE_I3C_HDR_DDR	= 0x6,
113	MODE_I3C_HDR_BT		= 0x7,
114	MODE_I3C_Fm_FmP		= 0x8,
115	MODE_I2C_Fm		= 0x0,
116	MODE_I2C_FmP		= 0x1,
117	MODE_I2C_UD1		= 0x2,
118	MODE_I2C_UD2		= 0x3,
119	MODE_I2C_UD3		= 0x4,
120};
121
122static enum hci_cmd_mode get_i3c_mode(struct i3c_hci *hci)
123{
124	struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
125
126	if (bus->scl_rate.i3c >= 12500000)
127		return MODE_I3C_SDR0;
128	if (bus->scl_rate.i3c > 8000000)
129		return MODE_I3C_SDR1;
130	if (bus->scl_rate.i3c > 6000000)
131		return MODE_I3C_SDR2;
132	if (bus->scl_rate.i3c > 4000000)
133		return MODE_I3C_SDR3;
134	if (bus->scl_rate.i3c > 2000000)
135		return MODE_I3C_SDR4;
136	return MODE_I3C_Fm_FmP;
137}
138
139static enum hci_cmd_mode get_i2c_mode(struct i3c_hci *hci)
140{
141	struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
142
143	if (bus->scl_rate.i2c >= 1000000)
144		return MODE_I2C_FmP;
145	return MODE_I2C_Fm;
146}
147
148static void fill_data_bytes(struct hci_xfer *xfer, u8 *data,
149			    unsigned int data_len)
150{
151	xfer->cmd_desc[1] = 0;
152	switch (data_len) {
153	case 4:
154		xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_4(data[3]);
155		fallthrough;
156	case 3:
157		xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_3(data[2]);
158		fallthrough;
159	case 2:
160		xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_2(data[1]);
161		fallthrough;
162	case 1:
163		xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_1(data[0]);
164		fallthrough;
165	case 0:
166		break;
167	}
168	/* we consumed all the data with the cmd descriptor */
169	xfer->data = NULL;
170}
171
172static int hci_cmd_v1_prep_ccc(struct i3c_hci *hci,
173			       struct hci_xfer *xfer,
174			       u8 ccc_addr, u8 ccc_cmd, bool raw)
175{
176	unsigned int dat_idx = 0;
177	enum hci_cmd_mode mode = get_i3c_mode(hci);
178	u8 *data = xfer->data;
179	unsigned int data_len = xfer->data_len;
180	bool rnw = xfer->rnw;
181	int ret;
182
183	/* this should never happen */
184	if (WARN_ON(raw))
185		return -EINVAL;
186
187	if (ccc_addr != I3C_BROADCAST_ADDR) {
188		ret = mipi_i3c_hci_dat_v1.get_index(hci, ccc_addr);
189		if (ret < 0)
190			return ret;
191		dat_idx = ret;
192	}
193
194	xfer->cmd_tid = hci_get_tid();
195
196	if (!rnw && data_len <= 4) {
197		/* we use an Immediate Data Transfer Command */
198		xfer->cmd_desc[0] =
199			CMD_0_ATTR_I |
200			CMD_I0_TID(xfer->cmd_tid) |
201			CMD_I0_CMD(ccc_cmd) | CMD_I0_CP |
202			CMD_I0_DEV_INDEX(dat_idx) |
203			CMD_I0_DTT(data_len) |
204			CMD_I0_MODE(mode);
205		fill_data_bytes(xfer, data, data_len);
206	} else {
207		/* we use a Regular Data Transfer Command */
208		xfer->cmd_desc[0] =
209			CMD_0_ATTR_R |
210			CMD_R0_TID(xfer->cmd_tid) |
211			CMD_R0_CMD(ccc_cmd) | CMD_R0_CP |
212			CMD_R0_DEV_INDEX(dat_idx) |
213			CMD_R0_MODE(mode) |
214			(rnw ? CMD_R0_RNW : 0);
215		xfer->cmd_desc[1] =
216			CMD_R1_DATA_LENGTH(data_len);
217	}
218
219	return 0;
220}
221
222static void hci_cmd_v1_prep_i3c_xfer(struct i3c_hci *hci,
223				     struct i3c_dev_desc *dev,
224				     struct hci_xfer *xfer)
225{
226	struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
227	unsigned int dat_idx = dev_data->dat_idx;
228	enum hci_cmd_mode mode = get_i3c_mode(hci);
229	u8 *data = xfer->data;
230	unsigned int data_len = xfer->data_len;
231	bool rnw = xfer->rnw;
232
233	xfer->cmd_tid = hci_get_tid();
234
235	if (!rnw && data_len <= 4) {
236		/* we use an Immediate Data Transfer Command */
237		xfer->cmd_desc[0] =
238			CMD_0_ATTR_I |
239			CMD_I0_TID(xfer->cmd_tid) |
240			CMD_I0_DEV_INDEX(dat_idx) |
241			CMD_I0_DTT(data_len) |
242			CMD_I0_MODE(mode);
243		fill_data_bytes(xfer, data, data_len);
244	} else {
245		/* we use a Regular Data Transfer Command */
246		xfer->cmd_desc[0] =
247			CMD_0_ATTR_R |
248			CMD_R0_TID(xfer->cmd_tid) |
249			CMD_R0_DEV_INDEX(dat_idx) |
250			CMD_R0_MODE(mode) |
251			(rnw ? CMD_R0_RNW : 0);
252		xfer->cmd_desc[1] =
253			CMD_R1_DATA_LENGTH(data_len);
254	}
255}
256
257static void hci_cmd_v1_prep_i2c_xfer(struct i3c_hci *hci,
258				     struct i2c_dev_desc *dev,
259				     struct hci_xfer *xfer)
260{
261	struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
262	unsigned int dat_idx = dev_data->dat_idx;
263	enum hci_cmd_mode mode = get_i2c_mode(hci);
264	u8 *data = xfer->data;
265	unsigned int data_len = xfer->data_len;
266	bool rnw = xfer->rnw;
267
268	xfer->cmd_tid = hci_get_tid();
269
270	if (!rnw && data_len <= 4) {
271		/* we use an Immediate Data Transfer Command */
272		xfer->cmd_desc[0] =
273			CMD_0_ATTR_I |
274			CMD_I0_TID(xfer->cmd_tid) |
275			CMD_I0_DEV_INDEX(dat_idx) |
276			CMD_I0_DTT(data_len) |
277			CMD_I0_MODE(mode);
278		fill_data_bytes(xfer, data, data_len);
279	} else {
280		/* we use a Regular Data Transfer Command */
281		xfer->cmd_desc[0] =
282			CMD_0_ATTR_R |
283			CMD_R0_TID(xfer->cmd_tid) |
284			CMD_R0_DEV_INDEX(dat_idx) |
285			CMD_R0_MODE(mode) |
286			(rnw ? CMD_R0_RNW : 0);
287		xfer->cmd_desc[1] =
288			CMD_R1_DATA_LENGTH(data_len);
289	}
290}
291
292static int hci_cmd_v1_daa(struct i3c_hci *hci)
293{
294	struct hci_xfer *xfer;
295	int ret, dat_idx = -1;
296	u8 next_addr = 0;
297	u64 pid;
298	unsigned int dcr, bcr;
299	DECLARE_COMPLETION_ONSTACK(done);
300
301	xfer = hci_alloc_xfer(2);
302	if (!xfer)
303		return -ENOMEM;
304
305	/*
306	 * Simple for now: we allocate a temporary DAT entry, do a single
307	 * DAA, register the device which will allocate its own DAT entry
308	 * via the core callback, then free the temporary DAT entry.
309	 * Loop until there is no more devices to assign an address to.
310	 * Yes, there is room for improvements.
311	 */
312	for (;;) {
313		ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
314		if (ret < 0)
315			break;
316		dat_idx = ret;
317		ret = i3c_master_get_free_addr(&hci->master, next_addr);
318		if (ret < 0)
319			break;
320		next_addr = ret;
321
322		DBG("next_addr = 0x%02x, DAA using DAT %d", next_addr, dat_idx);
323		mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dat_idx, next_addr);
324		mipi_i3c_hci_dct_index_reset(hci);
325
326		xfer->cmd_tid = hci_get_tid();
327		xfer->cmd_desc[0] =
328			CMD_0_ATTR_A |
329			CMD_A0_TID(xfer->cmd_tid) |
330			CMD_A0_CMD(I3C_CCC_ENTDAA) |
331			CMD_A0_DEV_INDEX(dat_idx) |
332			CMD_A0_DEV_COUNT(1) |
333			CMD_A0_ROC | CMD_A0_TOC;
334		xfer->cmd_desc[1] = 0;
335		hci->io->queue_xfer(hci, xfer, 1);
336		if (!wait_for_completion_timeout(&done, HZ) &&
337		    hci->io->dequeue_xfer(hci, xfer, 1)) {
338			ret = -ETIME;
339			break;
340		}
341		if (RESP_STATUS(xfer[0].response) == RESP_ERR_NACK &&
342		    RESP_DATA_LENGTH(xfer->response) == 1) {
343			ret = 0;  /* no more devices to be assigned */
344			break;
345		}
346		if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
347			ret = -EIO;
348			break;
349		}
350
351		i3c_hci_dct_get_val(hci, 0, &pid, &dcr, &bcr);
352		DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
353		    next_addr, pid, dcr, bcr);
354
355		mipi_i3c_hci_dat_v1.free_entry(hci, dat_idx);
356		dat_idx = -1;
357
358		/*
359		 * TODO: Extend the subsystem layer to allow for registering
360		 * new device and provide BCR/DCR/PID at the same time.
361		 */
362		ret = i3c_master_add_i3c_dev_locked(&hci->master, next_addr);
363		if (ret)
364			break;
365	}
366
367	if (dat_idx >= 0)
368		mipi_i3c_hci_dat_v1.free_entry(hci, dat_idx);
369	hci_free_xfer(xfer, 1);
370	return ret;
371}
372
373const struct hci_cmd_ops mipi_i3c_hci_cmd_v1 = {
374	.prep_ccc		= hci_cmd_v1_prep_ccc,
375	.prep_i3c_xfer		= hci_cmd_v1_prep_i3c_xfer,
376	.prep_i2c_xfer		= hci_cmd_v1_prep_i2c_xfer,
377	.perform_daa		= hci_cmd_v1_daa,
378};
379