1// SPDX-License-Identifier: GPL-2.0
2/*
3 * System Control and Management Interface (SCMI) Message Protocol driver
4 *
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
8 * Cortex M3 and AP.
9 *
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
13 *
14 * Copyright (C) 2018 ARM Ltd.
15 */
16
17#include <linux/bitmap.h>
18#include <linux/export.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
21#include <linux/ktime.h>
22#include <linux/module.h>
23#include <linux/of_address.h>
24#include <linux/of_device.h>
25#include <linux/processor.h>
26#include <linux/slab.h>
27
28#include "common.h"
29#include "notify.h"
30
31#define CREATE_TRACE_POINTS
32#include <trace/events/scmi.h>
33
34enum scmi_error_codes {
35	SCMI_SUCCESS = 0,	/* Success */
36	SCMI_ERR_SUPPORT = -1,	/* Not supported */
37	SCMI_ERR_PARAMS = -2,	/* Invalid Parameters */
38	SCMI_ERR_ACCESS = -3,	/* Invalid access/permission denied */
39	SCMI_ERR_ENTRY = -4,	/* Not found */
40	SCMI_ERR_RANGE = -5,	/* Value out of range */
41	SCMI_ERR_BUSY = -6,	/* Device busy */
42	SCMI_ERR_COMMS = -7,	/* Communication Error */
43	SCMI_ERR_GENERIC = -8,	/* Generic Error */
44	SCMI_ERR_HARDWARE = -9,	/* Hardware Error */
45	SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
46};
47
48/* List of all SCMI devices active in system */
49static LIST_HEAD(scmi_list);
50/* Protection for the entire list */
51static DEFINE_MUTEX(scmi_list_mutex);
52/* Track the unique id for the transfers for debug & profiling purpose */
53static atomic_t transfer_last_id;
54
55/**
56 * struct scmi_xfers_info - Structure to manage transfer information
57 *
58 * @xfer_block: Preallocated Message array
59 * @xfer_alloc_table: Bitmap table for allocated messages.
60 *	Index of this bitmap table is also used for message
61 *	sequence identifier.
62 * @xfer_lock: Protection for message allocation
63 */
64struct scmi_xfers_info {
65	struct scmi_xfer *xfer_block;
66	unsigned long *xfer_alloc_table;
67	spinlock_t xfer_lock;
68};
69
70/**
71 * struct scmi_info - Structure representing a SCMI instance
72 *
73 * @dev: Device pointer
74 * @desc: SoC description for this instance
75 * @version: SCMI revision information containing protocol version,
76 *	implementation version and (sub-)vendor identification.
77 * @handle: Instance of SCMI handle to send to clients
78 * @tx_minfo: Universal Transmit Message management info
79 * @rx_minfo: Universal Receive Message management info
80 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
81 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
82 * @protocols_imp: List of protocols implemented, currently maximum of
83 *	MAX_PROTOCOLS_IMP elements allocated by the base protocol
84 * @node: List head
85 * @users: Number of users of this instance
86 */
87struct scmi_info {
88	struct device *dev;
89	const struct scmi_desc *desc;
90	struct scmi_revision_info version;
91	struct scmi_handle handle;
92	struct scmi_xfers_info tx_minfo;
93	struct scmi_xfers_info rx_minfo;
94	struct idr tx_idr;
95	struct idr rx_idr;
96	u8 *protocols_imp;
97	struct list_head node;
98	int users;
99};
100
101#define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)
102
103static const int scmi_linux_errmap[] = {
104	/* better than switch case as long as return value is continuous */
105	0,			/* SCMI_SUCCESS */
106	-EOPNOTSUPP,		/* SCMI_ERR_SUPPORT */
107	-EINVAL,		/* SCMI_ERR_PARAM */
108	-EACCES,		/* SCMI_ERR_ACCESS */
109	-ENOENT,		/* SCMI_ERR_ENTRY */
110	-ERANGE,		/* SCMI_ERR_RANGE */
111	-EBUSY,			/* SCMI_ERR_BUSY */
112	-ECOMM,			/* SCMI_ERR_COMMS */
113	-EIO,			/* SCMI_ERR_GENERIC */
114	-EREMOTEIO,		/* SCMI_ERR_HARDWARE */
115	-EPROTO,		/* SCMI_ERR_PROTOCOL */
116};
117
118static inline int scmi_to_linux_errno(int errno)
119{
120	int err_idx = -errno;
121
122	if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
123		return scmi_linux_errmap[err_idx];
124	return -EIO;
125}
126
127/**
128 * scmi_dump_header_dbg() - Helper to dump a message header.
129 *
130 * @dev: Device pointer corresponding to the SCMI entity
131 * @hdr: pointer to header.
132 */
133static inline void scmi_dump_header_dbg(struct device *dev,
134					struct scmi_msg_hdr *hdr)
135{
136	dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
137		hdr->id, hdr->seq, hdr->protocol_id);
138}
139
140/**
141 * scmi_xfer_get() - Allocate one message
142 *
143 * @handle: Pointer to SCMI entity handle
144 * @minfo: Pointer to Tx/Rx Message management info based on channel type
145 *
146 * Helper function which is used by various message functions that are
147 * exposed to clients of this driver for allocating a message traffic event.
148 *
149 * This function can sleep depending on pending requests already in the system
150 * for the SCMI entity. Further, this also holds a spinlock to maintain
151 * integrity of internal data structures.
152 *
153 * Return: 0 if all went fine, else corresponding error.
154 */
155static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
156				       struct scmi_xfers_info *minfo)
157{
158	u16 xfer_id;
159	struct scmi_xfer *xfer;
160	unsigned long flags, bit_pos;
161	struct scmi_info *info = handle_to_scmi_info(handle);
162
163	/* Keep the locked section as small as possible */
164	spin_lock_irqsave(&minfo->xfer_lock, flags);
165	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
166				      info->desc->max_msg);
167	if (bit_pos == info->desc->max_msg) {
168		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
169		return ERR_PTR(-ENOMEM);
170	}
171	set_bit(bit_pos, minfo->xfer_alloc_table);
172	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
173
174	xfer_id = bit_pos;
175
176	xfer = &minfo->xfer_block[xfer_id];
177	xfer->hdr.seq = xfer_id;
178	reinit_completion(&xfer->done);
179	xfer->transfer_id = atomic_inc_return(&transfer_last_id);
180
181	return xfer;
182}
183
184/**
185 * __scmi_xfer_put() - Release a message
186 *
187 * @minfo: Pointer to Tx/Rx Message management info based on channel type
188 * @xfer: message that was reserved by scmi_xfer_get
189 *
190 * This holds a spinlock to maintain integrity of internal data structures.
191 */
192static void
193__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
194{
195	unsigned long flags;
196
197	/*
198	 * Keep the locked section as small as possible
199	 * NOTE: we might escape with smp_mb and no lock here..
200	 * but just be conservative and symmetric.
201	 */
202	spin_lock_irqsave(&minfo->xfer_lock, flags);
203	clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
204	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
205}
206
207static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
208{
209	struct scmi_xfer *xfer;
210	struct device *dev = cinfo->dev;
211	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
212	struct scmi_xfers_info *minfo = &info->rx_minfo;
213	ktime_t ts;
214
215	ts = ktime_get_boottime();
216	xfer = scmi_xfer_get(cinfo->handle, minfo);
217	if (IS_ERR(xfer)) {
218		dev_err(dev, "failed to get free message slot (%ld)\n",
219			PTR_ERR(xfer));
220		info->desc->ops->clear_channel(cinfo);
221		return;
222	}
223
224	unpack_scmi_header(msg_hdr, &xfer->hdr);
225	scmi_dump_header_dbg(dev, &xfer->hdr);
226	info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
227					    xfer);
228	scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
229		    xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
230
231	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
232			   xfer->hdr.protocol_id, xfer->hdr.seq,
233			   MSG_TYPE_NOTIFICATION);
234
235	__scmi_xfer_put(minfo, xfer);
236
237	info->desc->ops->clear_channel(cinfo);
238}
239
240static void scmi_handle_response(struct scmi_chan_info *cinfo,
241				 u16 xfer_id, u8 msg_type)
242{
243	struct scmi_xfer *xfer;
244	struct device *dev = cinfo->dev;
245	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
246	struct scmi_xfers_info *minfo = &info->tx_minfo;
247
248	/* Are we even expecting this? */
249	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
250		dev_err(dev, "message for %d is not expected!\n", xfer_id);
251		info->desc->ops->clear_channel(cinfo);
252		return;
253	}
254
255	xfer = &minfo->xfer_block[xfer_id];
256	/*
257	 * Even if a response was indeed expected on this slot at this point,
258	 * a buggy platform could wrongly reply feeding us an unexpected
259	 * delayed response we're not prepared to handle: bail-out safely
260	 * blaming firmware.
261	 */
262	if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
263		dev_err(dev,
264			"Delayed Response for %d not expected! Buggy F/W ?\n",
265			xfer_id);
266		info->desc->ops->clear_channel(cinfo);
267		/* It was unexpected, so nobody will clear the xfer if not us */
268		__scmi_xfer_put(minfo, xfer);
269		return;
270	}
271
272	/* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
273	if (msg_type == MSG_TYPE_DELAYED_RESP)
274		xfer->rx.len = info->desc->max_msg_size;
275
276	scmi_dump_header_dbg(dev, &xfer->hdr);
277
278	info->desc->ops->fetch_response(cinfo, xfer);
279
280	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
281			   xfer->hdr.protocol_id, xfer->hdr.seq,
282			   msg_type);
283
284	if (msg_type == MSG_TYPE_DELAYED_RESP) {
285		info->desc->ops->clear_channel(cinfo);
286		complete(xfer->async_done);
287	} else {
288		complete(&xfer->done);
289	}
290}
291
292/**
293 * scmi_rx_callback() - callback for receiving messages
294 *
295 * @cinfo: SCMI channel info
296 * @msg_hdr: Message header
297 *
298 * Processes one received message to appropriate transfer information and
299 * signals completion of the transfer.
300 *
301 * NOTE: This function will be invoked in IRQ context, hence should be
302 * as optimal as possible.
303 */
304void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
305{
306	u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
307	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
308
309	switch (msg_type) {
310	case MSG_TYPE_NOTIFICATION:
311		scmi_handle_notification(cinfo, msg_hdr);
312		break;
313	case MSG_TYPE_COMMAND:
314	case MSG_TYPE_DELAYED_RESP:
315		scmi_handle_response(cinfo, xfer_id, msg_type);
316		break;
317	default:
318		WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
319		break;
320	}
321}
322
323/**
324 * scmi_xfer_put() - Release a transmit message
325 *
326 * @handle: Pointer to SCMI entity handle
327 * @xfer: message that was reserved by scmi_xfer_get
328 */
329void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
330{
331	struct scmi_info *info = handle_to_scmi_info(handle);
332
333	__scmi_xfer_put(&info->tx_minfo, xfer);
334}
335
336#define SCMI_MAX_POLL_TO_NS	(100 * NSEC_PER_USEC)
337
338static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
339				      struct scmi_xfer *xfer, ktime_t stop)
340{
341	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
342
343	return info->desc->ops->poll_done(cinfo, xfer) ||
344	       ktime_after(ktime_get(), stop);
345}
346
347/**
348 * scmi_do_xfer() - Do one transfer
349 *
350 * @handle: Pointer to SCMI entity handle
351 * @xfer: Transfer to initiate and wait for response
352 *
353 * Return: -ETIMEDOUT in case of no response, if transmit error,
354 *	return corresponding error, else if all goes well,
355 *	return 0.
356 */
357int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
358{
359	int ret;
360	int timeout;
361	struct scmi_info *info = handle_to_scmi_info(handle);
362	struct device *dev = info->dev;
363	struct scmi_chan_info *cinfo;
364
365	cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
366	if (unlikely(!cinfo))
367		return -EINVAL;
368
369	trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
370			      xfer->hdr.protocol_id, xfer->hdr.seq,
371			      xfer->hdr.poll_completion);
372
373	ret = info->desc->ops->send_message(cinfo, xfer);
374	if (ret < 0) {
375		dev_dbg(dev, "Failed to send message %d\n", ret);
376		return ret;
377	}
378
379	if (xfer->hdr.poll_completion) {
380		ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
381
382		spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
383
384		if (ktime_before(ktime_get(), stop))
385			info->desc->ops->fetch_response(cinfo, xfer);
386		else
387			ret = -ETIMEDOUT;
388	} else {
389		/* And we wait for the response. */
390		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
391		if (!wait_for_completion_timeout(&xfer->done, timeout)) {
392			dev_err(dev, "timed out in resp(caller: %pS)\n",
393				(void *)_RET_IP_);
394			ret = -ETIMEDOUT;
395		}
396	}
397
398	if (!ret && xfer->hdr.status)
399		ret = scmi_to_linux_errno(xfer->hdr.status);
400
401	if (info->desc->ops->mark_txdone)
402		info->desc->ops->mark_txdone(cinfo, ret);
403
404	trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
405			    xfer->hdr.protocol_id, xfer->hdr.seq, ret);
406
407	return ret;
408}
409
410void scmi_reset_rx_to_maxsz(const struct scmi_handle *handle,
411			    struct scmi_xfer *xfer)
412{
413	struct scmi_info *info = handle_to_scmi_info(handle);
414
415	xfer->rx.len = info->desc->max_msg_size;
416}
417
418#define SCMI_MAX_RESPONSE_TIMEOUT	(2 * MSEC_PER_SEC)
419
420/**
421 * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
422 *	response is received
423 *
424 * @handle: Pointer to SCMI entity handle
425 * @xfer: Transfer to initiate and wait for response
426 *
427 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
428 *	return corresponding error, else if all goes well, return 0.
429 */
430int scmi_do_xfer_with_response(const struct scmi_handle *handle,
431			       struct scmi_xfer *xfer)
432{
433	int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
434	DECLARE_COMPLETION_ONSTACK(async_response);
435
436	xfer->async_done = &async_response;
437
438	ret = scmi_do_xfer(handle, xfer);
439	if (!ret) {
440		if (!wait_for_completion_timeout(xfer->async_done, timeout))
441			ret = -ETIMEDOUT;
442		else if (xfer->hdr.status)
443			ret = scmi_to_linux_errno(xfer->hdr.status);
444	}
445
446	xfer->async_done = NULL;
447	return ret;
448}
449
450/**
451 * scmi_xfer_get_init() - Allocate and initialise one message for transmit
452 *
453 * @handle: Pointer to SCMI entity handle
454 * @msg_id: Message identifier
455 * @prot_id: Protocol identifier for the message
456 * @tx_size: transmit message size
457 * @rx_size: receive message size
458 * @p: pointer to the allocated and initialised message
459 *
460 * This function allocates the message using @scmi_xfer_get and
461 * initialise the header.
462 *
463 * Return: 0 if all went fine with @p pointing to message, else
464 *	corresponding error.
465 */
466int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
467		       size_t tx_size, size_t rx_size, struct scmi_xfer **p)
468{
469	int ret;
470	struct scmi_xfer *xfer;
471	struct scmi_info *info = handle_to_scmi_info(handle);
472	struct scmi_xfers_info *minfo = &info->tx_minfo;
473	struct device *dev = info->dev;
474
475	/* Ensure we have sane transfer sizes */
476	if (rx_size > info->desc->max_msg_size ||
477	    tx_size > info->desc->max_msg_size)
478		return -ERANGE;
479
480	xfer = scmi_xfer_get(handle, minfo);
481	if (IS_ERR(xfer)) {
482		ret = PTR_ERR(xfer);
483		dev_err(dev, "failed to get free message slot(%d)\n", ret);
484		return ret;
485	}
486
487	xfer->tx.len = tx_size;
488	xfer->rx.len = rx_size ? : info->desc->max_msg_size;
489	xfer->hdr.id = msg_id;
490	xfer->hdr.protocol_id = prot_id;
491	xfer->hdr.poll_completion = false;
492
493	*p = xfer;
494
495	return 0;
496}
497
498/**
499 * scmi_version_get() - command to get the revision of the SCMI entity
500 *
501 * @handle: Pointer to SCMI entity handle
502 * @protocol: Protocol identifier for the message
503 * @version: Holds returned version of protocol.
504 *
505 * Updates the SCMI information in the internal data structure.
506 *
507 * Return: 0 if all went fine, else return appropriate error.
508 */
509int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
510		     u32 *version)
511{
512	int ret;
513	__le32 *rev_info;
514	struct scmi_xfer *t;
515
516	ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
517				 sizeof(*version), &t);
518	if (ret)
519		return ret;
520
521	ret = scmi_do_xfer(handle, t);
522	if (!ret) {
523		rev_info = t->rx.buf;
524		*version = le32_to_cpu(*rev_info);
525	}
526
527	scmi_xfer_put(handle, t);
528	return ret;
529}
530
531void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
532				     u8 *prot_imp)
533{
534	struct scmi_info *info = handle_to_scmi_info(handle);
535
536	info->protocols_imp = prot_imp;
537}
538
539static bool
540scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
541{
542	int i;
543	struct scmi_info *info = handle_to_scmi_info(handle);
544
545	if (!info->protocols_imp)
546		return false;
547
548	for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
549		if (info->protocols_imp[i] == prot_id)
550			return true;
551	return false;
552}
553
554/**
555 * scmi_handle_get() - Get the SCMI handle for a device
556 *
557 * @dev: pointer to device for which we want SCMI handle
558 *
559 * NOTE: The function does not track individual clients of the framework
560 * and is expected to be maintained by caller of SCMI protocol library.
561 * scmi_handle_put must be balanced with successful scmi_handle_get
562 *
563 * Return: pointer to handle if successful, NULL on error
564 */
565struct scmi_handle *scmi_handle_get(struct device *dev)
566{
567	struct list_head *p;
568	struct scmi_info *info;
569	struct scmi_handle *handle = NULL;
570
571	mutex_lock(&scmi_list_mutex);
572	list_for_each(p, &scmi_list) {
573		info = list_entry(p, struct scmi_info, node);
574		if (dev->parent == info->dev) {
575			handle = &info->handle;
576			info->users++;
577			break;
578		}
579	}
580	mutex_unlock(&scmi_list_mutex);
581
582	return handle;
583}
584
585/**
586 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
587 *
588 * @handle: handle acquired by scmi_handle_get
589 *
590 * NOTE: The function does not track individual clients of the framework
591 * and is expected to be maintained by caller of SCMI protocol library.
592 * scmi_handle_put must be balanced with successful scmi_handle_get
593 *
594 * Return: 0 is successfully released
595 *	if null was passed, it returns -EINVAL;
596 */
597int scmi_handle_put(const struct scmi_handle *handle)
598{
599	struct scmi_info *info;
600
601	if (!handle)
602		return -EINVAL;
603
604	info = handle_to_scmi_info(handle);
605	mutex_lock(&scmi_list_mutex);
606	if (!WARN_ON(!info->users))
607		info->users--;
608	mutex_unlock(&scmi_list_mutex);
609
610	return 0;
611}
612
613static int __scmi_xfer_info_init(struct scmi_info *sinfo,
614				 struct scmi_xfers_info *info)
615{
616	int i;
617	struct scmi_xfer *xfer;
618	struct device *dev = sinfo->dev;
619	const struct scmi_desc *desc = sinfo->desc;
620
621	/* Pre-allocated messages, no more than what hdr.seq can support */
622	if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
623		dev_err(dev,
624			"Invalid maximum messages %d, not in range [1 - %lu]\n",
625			desc->max_msg, MSG_TOKEN_MAX);
626		return -EINVAL;
627	}
628
629	info->xfer_block = devm_kcalloc(dev, desc->max_msg,
630					sizeof(*info->xfer_block), GFP_KERNEL);
631	if (!info->xfer_block)
632		return -ENOMEM;
633
634	info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
635					      sizeof(long), GFP_KERNEL);
636	if (!info->xfer_alloc_table)
637		return -ENOMEM;
638
639	/* Pre-initialize the buffer pointer to pre-allocated buffers */
640	for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
641		xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
642					    GFP_KERNEL);
643		if (!xfer->rx.buf)
644			return -ENOMEM;
645
646		xfer->tx.buf = xfer->rx.buf;
647		init_completion(&xfer->done);
648	}
649
650	spin_lock_init(&info->xfer_lock);
651
652	return 0;
653}
654
655static int scmi_xfer_info_init(struct scmi_info *sinfo)
656{
657	int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
658
659	if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
660		ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
661
662	return ret;
663}
664
665static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
666			   int prot_id, bool tx)
667{
668	int ret, idx;
669	struct scmi_chan_info *cinfo;
670	struct idr *idr;
671
672	/* Transmit channel is first entry i.e. index 0 */
673	idx = tx ? 0 : 1;
674	idr = tx ? &info->tx_idr : &info->rx_idr;
675
676	/* check if already allocated, used for multiple device per protocol */
677	cinfo = idr_find(idr, prot_id);
678	if (cinfo)
679		return 0;
680
681	if (!info->desc->ops->chan_available(dev, idx)) {
682		cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
683		if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
684			return -EINVAL;
685		goto idr_alloc;
686	}
687
688	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
689	if (!cinfo)
690		return -ENOMEM;
691
692	cinfo->dev = dev;
693
694	ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
695	if (ret)
696		return ret;
697
698idr_alloc:
699	ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
700	if (ret != prot_id) {
701		dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
702		return ret;
703	}
704
705	cinfo->handle = &info->handle;
706	return 0;
707}
708
709static inline int
710scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
711{
712	int ret = scmi_chan_setup(info, dev, prot_id, true);
713
714	if (!ret) {
715		/* Rx is optional, report only memory errors */
716		ret = scmi_chan_setup(info, dev, prot_id, false);
717		if (ret && ret != -ENOMEM)
718			ret = 0;
719	}
720
721	return ret;
722}
723
724static inline void
725scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
726			    int prot_id, const char *name)
727{
728	struct scmi_device *sdev;
729
730	sdev = scmi_device_create(np, info->dev, prot_id, name);
731	if (!sdev) {
732		dev_err(info->dev, "failed to create %d protocol device\n",
733			prot_id);
734		return;
735	}
736
737	if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
738		dev_err(&sdev->dev, "failed to setup transport\n");
739		scmi_device_destroy(sdev);
740		return;
741	}
742
743	/* setup handle now as the transport is ready */
744	scmi_set_handle(sdev);
745}
746
747#define MAX_SCMI_DEV_PER_PROTOCOL	2
748struct scmi_prot_devnames {
749	int protocol_id;
750	char *names[MAX_SCMI_DEV_PER_PROTOCOL];
751};
752
753static struct scmi_prot_devnames devnames[] = {
754	{ SCMI_PROTOCOL_POWER,  { "genpd" },},
755	{ SCMI_PROTOCOL_SYSTEM, { "syspower" },},
756	{ SCMI_PROTOCOL_PERF,   { "cpufreq" },},
757	{ SCMI_PROTOCOL_CLOCK,  { "clocks" },},
758	{ SCMI_PROTOCOL_SENSOR, { "hwmon" },},
759	{ SCMI_PROTOCOL_RESET,  { "reset" },},
760};
761
762static inline void
763scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info,
764			     int prot_id)
765{
766	int loop, cnt;
767
768	for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) {
769		if (devnames[loop].protocol_id != prot_id)
770			continue;
771
772		for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) {
773			const char *name = devnames[loop].names[cnt];
774
775			if (name)
776				scmi_create_protocol_device(np, info, prot_id,
777							    name);
778		}
779	}
780}
781
782static int scmi_probe(struct platform_device *pdev)
783{
784	int ret;
785	struct scmi_handle *handle;
786	const struct scmi_desc *desc;
787	struct scmi_info *info;
788	struct device *dev = &pdev->dev;
789	struct device_node *child, *np = dev->of_node;
790
791	desc = of_device_get_match_data(dev);
792	if (!desc)
793		return -EINVAL;
794
795	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
796	if (!info)
797		return -ENOMEM;
798
799	info->dev = dev;
800	info->desc = desc;
801	INIT_LIST_HEAD(&info->node);
802
803	platform_set_drvdata(pdev, info);
804	idr_init(&info->tx_idr);
805	idr_init(&info->rx_idr);
806
807	handle = &info->handle;
808	handle->dev = info->dev;
809	handle->version = &info->version;
810
811	ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
812	if (ret)
813		return ret;
814
815	ret = scmi_xfer_info_init(info);
816	if (ret)
817		return ret;
818
819	if (scmi_notification_init(handle))
820		dev_err(dev, "SCMI Notifications NOT available.\n");
821
822	ret = scmi_base_protocol_init(handle);
823	if (ret) {
824		dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
825		return ret;
826	}
827
828	mutex_lock(&scmi_list_mutex);
829	list_add_tail(&info->node, &scmi_list);
830	mutex_unlock(&scmi_list_mutex);
831
832	for_each_available_child_of_node(np, child) {
833		u32 prot_id;
834
835		if (of_property_read_u32(child, "reg", &prot_id))
836			continue;
837
838		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
839			dev_err(dev, "Out of range protocol %d\n", prot_id);
840
841		if (!scmi_is_protocol_implemented(handle, prot_id)) {
842			dev_err(dev, "SCMI protocol %d not implemented\n",
843				prot_id);
844			continue;
845		}
846
847		scmi_create_protocol_devices(child, info, prot_id);
848	}
849
850	return 0;
851}
852
853void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
854{
855	idr_remove(idr, id);
856}
857
858static int scmi_remove(struct platform_device *pdev)
859{
860	int ret = 0;
861	struct scmi_info *info = platform_get_drvdata(pdev);
862	struct idr *idr = &info->tx_idr;
863
864	mutex_lock(&scmi_list_mutex);
865	if (info->users)
866		ret = -EBUSY;
867	else
868		list_del(&info->node);
869	mutex_unlock(&scmi_list_mutex);
870
871	if (ret)
872		return ret;
873
874	scmi_notification_exit(&info->handle);
875
876	/* Safe to free channels since no more users */
877	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
878	idr_destroy(&info->tx_idr);
879
880	idr = &info->rx_idr;
881	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
882	idr_destroy(&info->rx_idr);
883
884	return ret;
885}
886
887static ssize_t protocol_version_show(struct device *dev,
888				     struct device_attribute *attr, char *buf)
889{
890	struct scmi_info *info = dev_get_drvdata(dev);
891
892	return sprintf(buf, "%u.%u\n", info->version.major_ver,
893		       info->version.minor_ver);
894}
895static DEVICE_ATTR_RO(protocol_version);
896
897static ssize_t firmware_version_show(struct device *dev,
898				     struct device_attribute *attr, char *buf)
899{
900	struct scmi_info *info = dev_get_drvdata(dev);
901
902	return sprintf(buf, "0x%x\n", info->version.impl_ver);
903}
904static DEVICE_ATTR_RO(firmware_version);
905
906static ssize_t vendor_id_show(struct device *dev,
907			      struct device_attribute *attr, char *buf)
908{
909	struct scmi_info *info = dev_get_drvdata(dev);
910
911	return sprintf(buf, "%s\n", info->version.vendor_id);
912}
913static DEVICE_ATTR_RO(vendor_id);
914
915static ssize_t sub_vendor_id_show(struct device *dev,
916				  struct device_attribute *attr, char *buf)
917{
918	struct scmi_info *info = dev_get_drvdata(dev);
919
920	return sprintf(buf, "%s\n", info->version.sub_vendor_id);
921}
922static DEVICE_ATTR_RO(sub_vendor_id);
923
924static struct attribute *versions_attrs[] = {
925	&dev_attr_firmware_version.attr,
926	&dev_attr_protocol_version.attr,
927	&dev_attr_vendor_id.attr,
928	&dev_attr_sub_vendor_id.attr,
929	NULL,
930};
931ATTRIBUTE_GROUPS(versions);
932
933/* Each compatible listed below must have descriptor associated with it */
934static const struct of_device_id scmi_of_match[] = {
935#ifdef CONFIG_MAILBOX
936	{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
937#endif
938#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
939	{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
940#endif
941	{ /* Sentinel */ },
942};
943
944MODULE_DEVICE_TABLE(of, scmi_of_match);
945
946static struct platform_driver scmi_driver = {
947	.driver = {
948		   .name = "arm-scmi",
949		   .suppress_bind_attrs = true,
950		   .of_match_table = scmi_of_match,
951		   .dev_groups = versions_groups,
952		   },
953	.probe = scmi_probe,
954	.remove = scmi_remove,
955};
956
957static int __init scmi_driver_init(void)
958{
959	scmi_bus_init();
960
961	scmi_clock_register();
962	scmi_perf_register();
963	scmi_power_register();
964	scmi_reset_register();
965	scmi_sensors_register();
966	scmi_system_register();
967
968	return platform_driver_register(&scmi_driver);
969}
970subsys_initcall(scmi_driver_init);
971
972static void __exit scmi_driver_exit(void)
973{
974	scmi_bus_exit();
975
976	scmi_clock_unregister();
977	scmi_perf_unregister();
978	scmi_power_unregister();
979	scmi_reset_unregister();
980	scmi_sensors_unregister();
981	scmi_system_unregister();
982
983	platform_driver_unregister(&scmi_driver);
984}
985module_exit(scmi_driver_exit);
986
987MODULE_ALIAS("platform:arm-scmi");
988MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
989MODULE_DESCRIPTION("ARM SCMI protocol driver");
990MODULE_LICENSE("GPL v2");
991