1/* SPDX-License-Identifier: BSD-3-Clause */
2/*
3 * Copyright (c) 2020, MIPI Alliance, Inc.
4 *
5 * Author: Nicolas Pitre <npitre@baylibre.com>
6 *
7 * Common HCI stuff
8 */
9
10#ifndef HCI_H
11#define HCI_H
12
13
14/* Handy logging macro to save on line length */
15#define DBG(x, ...) pr_devel("%s: " x "\n", __func__, ##__VA_ARGS__)
16
17/* 32-bit word aware bit and mask macros */
18#define W0_MASK(h, l)  GENMASK((h) - 0,  (l) - 0)
19#define W1_MASK(h, l)  GENMASK((h) - 32, (l) - 32)
20#define W2_MASK(h, l)  GENMASK((h) - 64, (l) - 64)
21#define W3_MASK(h, l)  GENMASK((h) - 96, (l) - 96)
22
23/* Same for single bit macros (trailing _ to align with W*_MASK width) */
24#define W0_BIT_(x)  BIT((x) - 0)
25#define W1_BIT_(x)  BIT((x) - 32)
26#define W2_BIT_(x)  BIT((x) - 64)
27#define W3_BIT_(x)  BIT((x) - 96)
28
29
30struct hci_cmd_ops;
31
32/* Our main structure */
33struct i3c_hci {
34	struct i3c_master_controller master;
35	void __iomem *base_regs;
36	void __iomem *DAT_regs;
37	void __iomem *DCT_regs;
38	void __iomem *RHS_regs;
39	void __iomem *PIO_regs;
40	void __iomem *EXTCAPS_regs;
41	void __iomem *AUTOCMD_regs;
42	void __iomem *DEBUG_regs;
43	const struct hci_io_ops *io;
44	void *io_data;
45	const struct hci_cmd_ops *cmd;
46	atomic_t next_cmd_tid;
47	u32 caps;
48	unsigned int quirks;
49	unsigned int DAT_entries;
50	unsigned int DAT_entry_size;
51	void *DAT_data;
52	unsigned int DCT_entries;
53	unsigned int DCT_entry_size;
54	u8 version_major;
55	u8 version_minor;
56	u8 revision;
57	u32 vendor_mipi_id;
58	u32 vendor_version_id;
59	u32 vendor_product_id;
60	void *vendor_data;
61};
62
63
64/*
65 * Structure to represent a master initiated transfer.
66 * The rnw, data and data_len fields must be initialized before calling any
67 * hci->cmd->*() method. The cmd method will initialize cmd_desc[] and
68 * possibly modify (clear) the data field. Then xfer->cmd_desc[0] can
69 * be augmented with CMD_0_ROC and/or CMD_0_TOC.
70 * The completion field needs to be initialized before queueing with
71 * hci->io->queue_xfer(), and requires CMD_0_ROC to be set.
72 */
73struct hci_xfer {
74	u32 cmd_desc[4];
75	u32 response;
76	bool rnw;
77	void *data;
78	unsigned int data_len;
79	unsigned int cmd_tid;
80	struct completion *completion;
81	union {
82		struct {
83			/* PIO specific */
84			struct hci_xfer *next_xfer;
85			struct hci_xfer *next_data;
86			struct hci_xfer *next_resp;
87			unsigned int data_left;
88			u32 data_word_before_partial;
89		};
90		struct {
91			/* DMA specific */
92			dma_addr_t data_dma;
93			int ring_number;
94			int ring_entry;
95		};
96	};
97};
98
99static inline struct hci_xfer *hci_alloc_xfer(unsigned int n)
100{
101	return kcalloc(n, sizeof(struct hci_xfer), GFP_KERNEL);
102}
103
104static inline void hci_free_xfer(struct hci_xfer *xfer, unsigned int n)
105{
106	kfree(xfer);
107}
108
109
110/* This abstracts PIO vs DMA operations */
111struct hci_io_ops {
112	bool (*irq_handler)(struct i3c_hci *hci, unsigned int mask);
113	int (*queue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
114	bool (*dequeue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
115	int (*request_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
116			   const struct i3c_ibi_setup *req);
117	void (*free_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev);
118	void (*recycle_ibi_slot)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
119				struct i3c_ibi_slot *slot);
120	int (*init)(struct i3c_hci *hci);
121	void (*cleanup)(struct i3c_hci *hci);
122};
123
124extern const struct hci_io_ops mipi_i3c_hci_pio;
125extern const struct hci_io_ops mipi_i3c_hci_dma;
126
127
128/* Our per device master private data */
129struct i3c_hci_dev_data {
130	int dat_idx;
131	void *ibi_data;
132};
133
134
135/* list of quirks */
136#define HCI_QUIRK_RAW_CCC	BIT(1)	/* CCC framing must be explicit */
137
138
139/* global functions */
140void mipi_i3c_hci_resume(struct i3c_hci *hci);
141void mipi_i3c_hci_pio_reset(struct i3c_hci *hci);
142void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci);
143
144#endif
145