1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Texas Instruments System Control Interface Protocol Driver
4 *
5 * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
6 * Nishanth Menon
7 */
8
9 #define pr_fmt(fmt) "%s: " fmt, __func__
10
11 #include <linux/bitmap.h>
12 #include <linux/debugfs.h>
13 #include <linux/export.h>
14 #include <linux/io.h>
15 #include <linux/kernel.h>
16 #include <linux/mailbox_client.h>
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/semaphore.h>
20 #include <linux/slab.h>
21 #include <linux/soc/ti/ti-msgmgr.h>
22 #include <linux/soc/ti/ti_sci_protocol.h>
23 #include <linux/reboot.h>
24
25 #include "ti_sci.h"
26
27 /* List of all TI SCI devices active in system */
28 static LIST_HEAD(ti_sci_list);
29 /* Protection for the entire list */
30 static DEFINE_MUTEX(ti_sci_list_mutex);
31
32 /**
33 * struct ti_sci_xfer - Structure representing a message flow
34 * @tx_message: Transmit message
35 * @rx_len: Receive message length
36 * @xfer_buf: Preallocated buffer to store receive message
37 * Since we work with request-ACK protocol, we can
38 * reuse the same buffer for the rx path as we
39 * use for the tx path.
40 * @done: completion event
41 */
42 struct ti_sci_xfer {
43 struct ti_msgmgr_message tx_message;
44 u8 rx_len;
45 u8 *xfer_buf;
46 struct completion done;
47 };
48
49 /**
50 * struct ti_sci_xfers_info - Structure to manage transfer information
51 * @sem_xfer_count: Counting Semaphore for managing max simultaneous
52 * Messages.
53 * @xfer_block: Preallocated Message array
54 * @xfer_alloc_table: Bitmap table for allocated messages.
55 * Index of this bitmap table is also used for message
56 * sequence identifier.
57 * @xfer_lock: Protection for message allocation
58 */
59 struct ti_sci_xfers_info {
60 struct semaphore sem_xfer_count;
61 struct ti_sci_xfer *xfer_block;
62 unsigned long *xfer_alloc_table;
63 /* protect transfer allocation */
64 spinlock_t xfer_lock;
65 };
66
67 /**
68 * struct ti_sci_desc - Description of SoC integration
69 * @default_host_id: Host identifier representing the compute entity
70 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
71 * @max_msgs: Maximum number of messages that can be pending
72 * simultaneously in the system
73 * @max_msg_size: Maximum size of data per message that can be handled.
74 */
75 struct ti_sci_desc {
76 u8 default_host_id;
77 int max_rx_timeout_ms;
78 int max_msgs;
79 int max_msg_size;
80 };
81
82 /**
83 * struct ti_sci_info - Structure representing a TI SCI instance
84 * @dev: Device pointer
85 * @desc: SoC description for this instance
86 * @nb: Reboot Notifier block
87 * @d: Debugfs file entry
88 * @debug_region: Memory region where the debug message are available
89 * @debug_region_size: Debug region size
90 * @debug_buffer: Buffer allocated to copy debug messages.
91 * @handle: Instance of TI SCI handle to send to clients.
92 * @cl: Mailbox Client
93 * @chan_tx: Transmit mailbox channel
94 * @chan_rx: Receive mailbox channel
95 * @minfo: Message info
96 * @node: list head
97 * @host_id: Host ID
98 * @users: Number of users of this instance
99 */
100 struct ti_sci_info {
101 struct device *dev;
102 struct notifier_block nb;
103 const struct ti_sci_desc *desc;
104 struct dentry *d;
105 void __iomem *debug_region;
106 char *debug_buffer;
107 size_t debug_region_size;
108 struct ti_sci_handle handle;
109 struct mbox_client cl;
110 struct mbox_chan *chan_tx;
111 struct mbox_chan *chan_rx;
112 struct ti_sci_xfers_info minfo;
113 struct list_head node;
114 u8 host_id;
115 /* protected by ti_sci_list_mutex */
116 int users;
117
118 };
119
120 #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
121 #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
122 #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
123
124 #ifdef CONFIG_DEBUG_FS
125
126 /**
127 * ti_sci_debug_show() - Helper to dump the debug log
128 * @s: sequence file pointer
129 * @unused: unused.
130 *
131 * Return: 0
132 */
ti_sci_debug_show(struct seq_file *s, void *unused)133 static int ti_sci_debug_show(struct seq_file *s, void *unused)
134 {
135 struct ti_sci_info *info = s->private;
136
137 memcpy_fromio(info->debug_buffer, info->debug_region,
138 info->debug_region_size);
139 /*
140 * We don't trust firmware to leave NULL terminated last byte (hence
141 * we have allocated 1 extra 0 byte). Since we cannot guarantee any
142 * specific data format for debug messages, We just present the data
143 * in the buffer as is - we expect the messages to be self explanatory.
144 */
145 seq_puts(s, info->debug_buffer);
146 return 0;
147 }
148
149 /* Provide the log file operations interface*/
150 DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
151
152 /**
153 * ti_sci_debugfs_create() - Create log debug file
154 * @pdev: platform device pointer
155 * @info: Pointer to SCI entity information
156 *
157 * Return: 0 if all went fine, else corresponding error.
158 */
ti_sci_debugfs_create(struct platform_device *pdev, struct ti_sci_info *info)159 static int ti_sci_debugfs_create(struct platform_device *pdev,
160 struct ti_sci_info *info)
161 {
162 struct device *dev = &pdev->dev;
163 struct resource *res;
164 char debug_name[50];
165
166 /* Debug region is optional */
167 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
168 "debug_messages");
169 info->debug_region = devm_ioremap_resource(dev, res);
170 if (IS_ERR(info->debug_region))
171 return 0;
172 info->debug_region_size = resource_size(res);
173
174 info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
175 sizeof(char), GFP_KERNEL);
176 if (!info->debug_buffer)
177 return -ENOMEM;
178 /* Setup NULL termination */
179 info->debug_buffer[info->debug_region_size] = 0;
180
181 snprintf(debug_name, sizeof(debug_name), "ti_sci_debug@%s",
182 dev_name(dev));
183 info->d = debugfs_create_file(debug_name, 0444, NULL, info,
184 &ti_sci_debug_fops);
185 if (IS_ERR(info->d))
186 return PTR_ERR(info->d);
187
188 dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
189 info->debug_region, info->debug_region_size, res);
190 return 0;
191 }
192
193 #else /* CONFIG_DEBUG_FS */
ti_sci_debugfs_create(struct platform_device *dev, struct ti_sci_info *info)194 static inline int ti_sci_debugfs_create(struct platform_device *dev,
195 struct ti_sci_info *info)
196 {
197 return 0;
198 }
199
ti_sci_debugfs_destroy(struct platform_device *dev, struct ti_sci_info *info)200 static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
201 struct ti_sci_info *info)
202 {
203 }
204 #endif /* CONFIG_DEBUG_FS */
205
206 /**
207 * ti_sci_dump_header_dbg() - Helper to dump a message header.
208 * @dev: Device pointer corresponding to the SCI entity
209 * @hdr: pointer to header.
210 */
ti_sci_dump_header_dbg(struct device *dev, struct ti_sci_msg_hdr *hdr)211 static inline void ti_sci_dump_header_dbg(struct device *dev,
212 struct ti_sci_msg_hdr *hdr)
213 {
214 dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
215 hdr->type, hdr->host, hdr->seq, hdr->flags);
216 }
217
218 /**
219 * ti_sci_rx_callback() - mailbox client callback for receive messages
220 * @cl: client pointer
221 * @m: mailbox message
222 *
223 * Processes one received message to appropriate transfer information and
224 * signals completion of the transfer.
225 *
226 * NOTE: This function will be invoked in IRQ context, hence should be
227 * as optimal as possible.
228 */
ti_sci_rx_callback(struct mbox_client *cl, void *m)229 static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
230 {
231 struct ti_sci_info *info = cl_to_ti_sci_info(cl);
232 struct device *dev = info->dev;
233 struct ti_sci_xfers_info *minfo = &info->minfo;
234 struct ti_msgmgr_message *mbox_msg = m;
235 struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
236 struct ti_sci_xfer *xfer;
237 u8 xfer_id;
238
239 xfer_id = hdr->seq;
240
241 /*
242 * Are we even expecting this?
243 * NOTE: barriers were implicit in locks used for modifying the bitmap
244 */
245 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
246 dev_err(dev, "Message for %d is not expected!\n", xfer_id);
247 return;
248 }
249
250 xfer = &minfo->xfer_block[xfer_id];
251
252 /* Is the message of valid length? */
253 if (mbox_msg->len > info->desc->max_msg_size) {
254 dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
255 mbox_msg->len, info->desc->max_msg_size);
256 ti_sci_dump_header_dbg(dev, hdr);
257 return;
258 }
259 if (mbox_msg->len < xfer->rx_len) {
260 dev_err(dev, "Recv xfer %zu < expected %d length\n",
261 mbox_msg->len, xfer->rx_len);
262 ti_sci_dump_header_dbg(dev, hdr);
263 return;
264 }
265
266 ti_sci_dump_header_dbg(dev, hdr);
267 /* Take a copy to the rx buffer.. */
268 memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
269 complete(&xfer->done);
270 }
271
272 /**
273 * ti_sci_get_one_xfer() - Allocate one message
274 * @info: Pointer to SCI entity information
275 * @msg_type: Message type
276 * @msg_flags: Flag to set for the message
277 * @tx_message_size: transmit message size
278 * @rx_message_size: receive message size
279 *
280 * Helper function which is used by various command functions that are
281 * exposed to clients of this driver for allocating a message traffic event.
282 *
283 * This function can sleep depending on pending requests already in the system
284 * for the SCI entity. Further, this also holds a spinlock to maintain integrity
285 * of internal data structures.
286 *
287 * Return: 0 if all went fine, else corresponding error.
288 */
ti_sci_get_one_xfer(struct ti_sci_info *info, u16 msg_type, u32 msg_flags, size_t tx_message_size, size_t rx_message_size)289 static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
290 u16 msg_type, u32 msg_flags,
291 size_t tx_message_size,
292 size_t rx_message_size)
293 {
294 struct ti_sci_xfers_info *minfo = &info->minfo;
295 struct ti_sci_xfer *xfer;
296 struct ti_sci_msg_hdr *hdr;
297 unsigned long flags;
298 unsigned long bit_pos;
299 u8 xfer_id;
300 int ret;
301 int timeout;
302
303 /* Ensure we have sane transfer sizes */
304 if (rx_message_size > info->desc->max_msg_size ||
305 tx_message_size > info->desc->max_msg_size ||
306 rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
307 return ERR_PTR(-ERANGE);
308
309 /*
310 * Ensure we have only controlled number of pending messages.
311 * Ideally, we might just have to wait a single message, be
312 * conservative and wait 5 times that..
313 */
314 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
315 ret = down_timeout(&minfo->sem_xfer_count, timeout);
316 if (ret < 0)
317 return ERR_PTR(ret);
318
319 /* Keep the locked section as small as possible */
320 spin_lock_irqsave(&minfo->xfer_lock, flags);
321 bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
322 info->desc->max_msgs);
323 set_bit(bit_pos, minfo->xfer_alloc_table);
324 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
325
326 /*
327 * We already ensured in probe that we can have max messages that can
328 * fit in hdr.seq - NOTE: this improves access latencies
329 * to predictable O(1) access, BUT, it opens us to risk if
330 * remote misbehaves with corrupted message sequence responses.
331 * If that happens, we are going to be messed up anyways..
332 */
333 xfer_id = (u8)bit_pos;
334
335 xfer = &minfo->xfer_block[xfer_id];
336
337 hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
338 xfer->tx_message.len = tx_message_size;
339 xfer->rx_len = (u8)rx_message_size;
340
341 reinit_completion(&xfer->done);
342
343 hdr->seq = xfer_id;
344 hdr->type = msg_type;
345 hdr->host = info->host_id;
346 hdr->flags = msg_flags;
347
348 return xfer;
349 }
350
351 /**
352 * ti_sci_put_one_xfer() - Release a message
353 * @minfo: transfer info pointer
354 * @xfer: message that was reserved by ti_sci_get_one_xfer
355 *
356 * This holds a spinlock to maintain integrity of internal data structures.
357 */
ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo, struct ti_sci_xfer *xfer)358 static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
359 struct ti_sci_xfer *xfer)
360 {
361 unsigned long flags;
362 struct ti_sci_msg_hdr *hdr;
363 u8 xfer_id;
364
365 hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
366 xfer_id = hdr->seq;
367
368 /*
369 * Keep the locked section as small as possible
370 * NOTE: we might escape with smp_mb and no lock here..
371 * but just be conservative and symmetric.
372 */
373 spin_lock_irqsave(&minfo->xfer_lock, flags);
374 clear_bit(xfer_id, minfo->xfer_alloc_table);
375 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
376
377 /* Increment the count for the next user to get through */
378 up(&minfo->sem_xfer_count);
379 }
380
381 /**
382 * ti_sci_do_xfer() - Do one transfer
383 * @info: Pointer to SCI entity information
384 * @xfer: Transfer to initiate and wait for response
385 *
386 * Return: -ETIMEDOUT in case of no response, if transmit error,
387 * return corresponding error, else if all goes well,
388 * return 0.
389 */
ti_sci_do_xfer(struct ti_sci_info *info, struct ti_sci_xfer *xfer)390 static inline int ti_sci_do_xfer(struct ti_sci_info *info,
391 struct ti_sci_xfer *xfer)
392 {
393 int ret;
394 int timeout;
395 struct device *dev = info->dev;
396
397 ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
398 if (ret < 0)
399 return ret;
400
401 ret = 0;
402
403 /* And we wait for the response. */
404 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
405 if (!wait_for_completion_timeout(&xfer->done, timeout)) {
406 dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
407 (void *)_RET_IP_);
408 ret = -ETIMEDOUT;
409 }
410 /*
411 * NOTE: we might prefer not to need the mailbox ticker to manage the
412 * transfer queueing since the protocol layer queues things by itself.
413 * Unfortunately, we have to kick the mailbox framework after we have
414 * received our message.
415 */
416 mbox_client_txdone(info->chan_tx, ret);
417
418 return ret;
419 }
420
421 /**
422 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
423 * @info: Pointer to SCI entity information
424 *
425 * Updates the SCI information in the internal data structure.
426 *
427 * Return: 0 if all went fine, else return appropriate error.
428 */
ti_sci_cmd_get_revision(struct ti_sci_info *info)429 static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
430 {
431 struct device *dev = info->dev;
432 struct ti_sci_handle *handle = &info->handle;
433 struct ti_sci_version_info *ver = &handle->version;
434 struct ti_sci_msg_resp_version *rev_info;
435 struct ti_sci_xfer *xfer;
436 int ret;
437
438 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
439 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
440 sizeof(struct ti_sci_msg_hdr),
441 sizeof(*rev_info));
442 if (IS_ERR(xfer)) {
443 ret = PTR_ERR(xfer);
444 dev_err(dev, "Message alloc failed(%d)\n", ret);
445 return ret;
446 }
447
448 rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
449
450 ret = ti_sci_do_xfer(info, xfer);
451 if (ret) {
452 dev_err(dev, "Mbox send fail %d\n", ret);
453 goto fail;
454 }
455
456 ver->abi_major = rev_info->abi_major;
457 ver->abi_minor = rev_info->abi_minor;
458 ver->firmware_revision = rev_info->firmware_revision;
459 strncpy(ver->firmware_description, rev_info->firmware_description,
460 sizeof(ver->firmware_description));
461
462 fail:
463 ti_sci_put_one_xfer(&info->minfo, xfer);
464 return ret;
465 }
466
467 /**
468 * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
469 * @r: pointer to response buffer
470 *
471 * Return: true if the response was an ACK, else returns false.
472 */
ti_sci_is_response_ack(void *r)473 static inline bool ti_sci_is_response_ack(void *r)
474 {
475 struct ti_sci_msg_hdr *hdr = r;
476
477 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
478 }
479
480 /**
481 * ti_sci_set_device_state() - Set device state helper
482 * @handle: pointer to TI SCI handle
483 * @id: Device identifier
484 * @flags: flags to setup for the device
485 * @state: State to move the device to
486 *
487 * Return: 0 if all went well, else returns appropriate error value.
488 */
ti_sci_set_device_state(const struct ti_sci_handle *handle, u32 id, u32 flags, u8 state)489 static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
490 u32 id, u32 flags, u8 state)
491 {
492 struct ti_sci_info *info;
493 struct ti_sci_msg_req_set_device_state *req;
494 struct ti_sci_msg_hdr *resp;
495 struct ti_sci_xfer *xfer;
496 struct device *dev;
497 int ret = 0;
498
499 if (IS_ERR(handle))
500 return PTR_ERR(handle);
501 if (!handle)
502 return -EINVAL;
503
504 info = handle_to_ti_sci_info(handle);
505 dev = info->dev;
506
507 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
508 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
509 sizeof(*req), sizeof(*resp));
510 if (IS_ERR(xfer)) {
511 ret = PTR_ERR(xfer);
512 dev_err(dev, "Message alloc failed(%d)\n", ret);
513 return ret;
514 }
515 req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
516 req->id = id;
517 req->state = state;
518
519 ret = ti_sci_do_xfer(info, xfer);
520 if (ret) {
521 dev_err(dev, "Mbox send fail %d\n", ret);
522 goto fail;
523 }
524
525 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
526
527 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
528
529 fail:
530 ti_sci_put_one_xfer(&info->minfo, xfer);
531
532 return ret;
533 }
534
535 /**
536 * ti_sci_get_device_state() - Get device state helper
537 * @handle: Handle to the device
538 * @id: Device Identifier
539 * @clcnt: Pointer to Context Loss Count
540 * @resets: pointer to resets
541 * @p_state: pointer to p_state
542 * @c_state: pointer to c_state
543 *
544 * Return: 0 if all went fine, else return appropriate error.
545 */
ti_sci_get_device_state(const struct ti_sci_handle *handle, u32 id, u32 *clcnt, u32 *resets, u8 *p_state, u8 *c_state)546 static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
547 u32 id, u32 *clcnt, u32 *resets,
548 u8 *p_state, u8 *c_state)
549 {
550 struct ti_sci_info *info;
551 struct ti_sci_msg_req_get_device_state *req;
552 struct ti_sci_msg_resp_get_device_state *resp;
553 struct ti_sci_xfer *xfer;
554 struct device *dev;
555 int ret = 0;
556
557 if (IS_ERR(handle))
558 return PTR_ERR(handle);
559 if (!handle)
560 return -EINVAL;
561
562 if (!clcnt && !resets && !p_state && !c_state)
563 return -EINVAL;
564
565 info = handle_to_ti_sci_info(handle);
566 dev = info->dev;
567
568 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
569 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
570 sizeof(*req), sizeof(*resp));
571 if (IS_ERR(xfer)) {
572 ret = PTR_ERR(xfer);
573 dev_err(dev, "Message alloc failed(%d)\n", ret);
574 return ret;
575 }
576 req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
577 req->id = id;
578
579 ret = ti_sci_do_xfer(info, xfer);
580 if (ret) {
581 dev_err(dev, "Mbox send fail %d\n", ret);
582 goto fail;
583 }
584
585 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
586 if (!ti_sci_is_response_ack(resp)) {
587 ret = -ENODEV;
588 goto fail;
589 }
590
591 if (clcnt)
592 *clcnt = resp->context_loss_count;
593 if (resets)
594 *resets = resp->resets;
595 if (p_state)
596 *p_state = resp->programmed_state;
597 if (c_state)
598 *c_state = resp->current_state;
599 fail:
600 ti_sci_put_one_xfer(&info->minfo, xfer);
601
602 return ret;
603 }
604
605 /**
606 * ti_sci_cmd_get_device() - command to request for device managed by TISCI
607 * that can be shared with other hosts.
608 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
609 * @id: Device Identifier
610 *
611 * Request for the device - NOTE: the client MUST maintain integrity of
612 * usage count by balancing get_device with put_device. No refcounting is
613 * managed by driver for that purpose.
614 *
615 * Return: 0 if all went fine, else return appropriate error.
616 */
ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)617 static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
618 {
619 return ti_sci_set_device_state(handle, id, 0,
620 MSG_DEVICE_SW_STATE_ON);
621 }
622
623 /**
624 * ti_sci_cmd_get_device_exclusive() - command to request for device managed by
625 * TISCI that is exclusively owned by the
626 * requesting host.
627 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
628 * @id: Device Identifier
629 *
630 * Request for the device - NOTE: the client MUST maintain integrity of
631 * usage count by balancing get_device with put_device. No refcounting is
632 * managed by driver for that purpose.
633 *
634 * Return: 0 if all went fine, else return appropriate error.
635 */
ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle, u32 id)636 static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
637 u32 id)
638 {
639 return ti_sci_set_device_state(handle, id,
640 MSG_FLAG_DEVICE_EXCLUSIVE,
641 MSG_DEVICE_SW_STATE_ON);
642 }
643
644 /**
645 * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
646 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
647 * @id: Device Identifier
648 *
649 * Request for the device - NOTE: the client MUST maintain integrity of
650 * usage count by balancing get_device with put_device. No refcounting is
651 * managed by driver for that purpose.
652 *
653 * Return: 0 if all went fine, else return appropriate error.
654 */
ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)655 static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
656 {
657 return ti_sci_set_device_state(handle, id, 0,
658 MSG_DEVICE_SW_STATE_RETENTION);
659 }
660
661 /**
662 * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
663 * TISCI that is exclusively owned by
664 * requesting host.
665 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
666 * @id: Device Identifier
667 *
668 * Request for the device - NOTE: the client MUST maintain integrity of
669 * usage count by balancing get_device with put_device. No refcounting is
670 * managed by driver for that purpose.
671 *
672 * Return: 0 if all went fine, else return appropriate error.
673 */
ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle, u32 id)674 static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
675 u32 id)
676 {
677 return ti_sci_set_device_state(handle, id,
678 MSG_FLAG_DEVICE_EXCLUSIVE,
679 MSG_DEVICE_SW_STATE_RETENTION);
680 }
681
682 /**
683 * ti_sci_cmd_put_device() - command to release a device managed by TISCI
684 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
685 * @id: Device Identifier
686 *
687 * Request for the device - NOTE: the client MUST maintain integrity of
688 * usage count by balancing get_device with put_device. No refcounting is
689 * managed by driver for that purpose.
690 *
691 * Return: 0 if all went fine, else return appropriate error.
692 */
ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)693 static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
694 {
695 return ti_sci_set_device_state(handle, id,
696 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
697 }
698
699 /**
700 * ti_sci_cmd_dev_is_valid() - Is the device valid
701 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
702 * @id: Device Identifier
703 *
704 * Return: 0 if all went fine and the device ID is valid, else return
705 * appropriate error.
706 */
ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)707 static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
708 {
709 u8 unused;
710
711 /* check the device state which will also tell us if the ID is valid */
712 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
713 }
714
715 /**
716 * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
717 * @handle: Pointer to TISCI handle
718 * @id: Device Identifier
719 * @count: Pointer to Context Loss counter to populate
720 *
721 * Return: 0 if all went fine, else return appropriate error.
722 */
ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id, u32 *count)723 static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
724 u32 *count)
725 {
726 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
727 }
728
729 /**
730 * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
731 * @handle: Pointer to TISCI handle
732 * @id: Device Identifier
733 * @r_state: true if requested to be idle
734 *
735 * Return: 0 if all went fine, else return appropriate error.
736 */
ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id, bool *r_state)737 static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
738 bool *r_state)
739 {
740 int ret;
741 u8 state;
742
743 if (!r_state)
744 return -EINVAL;
745
746 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
747 if (ret)
748 return ret;
749
750 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
751
752 return 0;
753 }
754
755 /**
756 * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
757 * @handle: Pointer to TISCI handle
758 * @id: Device Identifier
759 * @r_state: true if requested to be stopped
760 * @curr_state: true if currently stopped.
761 *
762 * Return: 0 if all went fine, else return appropriate error.
763 */
ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id, bool *r_state, bool *curr_state)764 static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
765 bool *r_state, bool *curr_state)
766 {
767 int ret;
768 u8 p_state, c_state;
769
770 if (!r_state && !curr_state)
771 return -EINVAL;
772
773 ret =
774 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
775 if (ret)
776 return ret;
777
778 if (r_state)
779 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
780 if (curr_state)
781 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
782
783 return 0;
784 }
785
786 /**
787 * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
788 * @handle: Pointer to TISCI handle
789 * @id: Device Identifier
790 * @r_state: true if requested to be ON
791 * @curr_state: true if currently ON and active
792 *
793 * Return: 0 if all went fine, else return appropriate error.
794 */
ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id, bool *r_state, bool *curr_state)795 static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
796 bool *r_state, bool *curr_state)
797 {
798 int ret;
799 u8 p_state, c_state;
800
801 if (!r_state && !curr_state)
802 return -EINVAL;
803
804 ret =
805 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
806 if (ret)
807 return ret;
808
809 if (r_state)
810 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
811 if (curr_state)
812 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
813
814 return 0;
815 }
816
817 /**
818 * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
819 * @handle: Pointer to TISCI handle
820 * @id: Device Identifier
821 * @curr_state: true if currently transitioning.
822 *
823 * Return: 0 if all went fine, else return appropriate error.
824 */
ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id, bool *curr_state)825 static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
826 bool *curr_state)
827 {
828 int ret;
829 u8 state;
830
831 if (!curr_state)
832 return -EINVAL;
833
834 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
835 if (ret)
836 return ret;
837
838 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
839
840 return 0;
841 }
842
843 /**
844 * ti_sci_cmd_set_device_resets() - command to set resets for device managed
845 * by TISCI
846 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
847 * @id: Device Identifier
848 * @reset_state: Device specific reset bit field
849 *
850 * Return: 0 if all went fine, else return appropriate error.
851 */
ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle, u32 id, u32 reset_state)852 static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
853 u32 id, u32 reset_state)
854 {
855 struct ti_sci_info *info;
856 struct ti_sci_msg_req_set_device_resets *req;
857 struct ti_sci_msg_hdr *resp;
858 struct ti_sci_xfer *xfer;
859 struct device *dev;
860 int ret = 0;
861
862 if (IS_ERR(handle))
863 return PTR_ERR(handle);
864 if (!handle)
865 return -EINVAL;
866
867 info = handle_to_ti_sci_info(handle);
868 dev = info->dev;
869
870 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
871 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
872 sizeof(*req), sizeof(*resp));
873 if (IS_ERR(xfer)) {
874 ret = PTR_ERR(xfer);
875 dev_err(dev, "Message alloc failed(%d)\n", ret);
876 return ret;
877 }
878 req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
879 req->id = id;
880 req->resets = reset_state;
881
882 ret = ti_sci_do_xfer(info, xfer);
883 if (ret) {
884 dev_err(dev, "Mbox send fail %d\n", ret);
885 goto fail;
886 }
887
888 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
889
890 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
891
892 fail:
893 ti_sci_put_one_xfer(&info->minfo, xfer);
894
895 return ret;
896 }
897
898 /**
899 * ti_sci_cmd_get_device_resets() - Get reset state for device managed
900 * by TISCI
901 * @handle: Pointer to TISCI handle
902 * @id: Device Identifier
903 * @reset_state: Pointer to reset state to populate
904 *
905 * Return: 0 if all went fine, else return appropriate error.
906 */
ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle, u32 id, u32 *reset_state)907 static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
908 u32 id, u32 *reset_state)
909 {
910 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
911 NULL);
912 }
913
914 /**
915 * ti_sci_set_clock_state() - Set clock state helper
916 * @handle: pointer to TI SCI handle
917 * @dev_id: Device identifier this request is for
918 * @clk_id: Clock identifier for the device for this request.
919 * Each device has it's own set of clock inputs. This indexes
920 * which clock input to modify.
921 * @flags: Header flags as needed
922 * @state: State to request for the clock.
923 *
924 * Return: 0 if all went well, else returns appropriate error value.
925 */
ti_sci_set_clock_state(const struct ti_sci_handle *handle, u32 dev_id, u32 clk_id, u32 flags, u8 state)926 static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
927 u32 dev_id, u32 clk_id,
928 u32 flags, u8 state)
929 {
930 struct ti_sci_info *info;
931 struct ti_sci_msg_req_set_clock_state *req;
932 struct ti_sci_msg_hdr *resp;
933 struct ti_sci_xfer *xfer;
934 struct device *dev;
935 int ret = 0;
936
937 if (IS_ERR(handle))
938 return PTR_ERR(handle);
939 if (!handle)
940 return -EINVAL;
941
942 info = handle_to_ti_sci_info(handle);
943 dev = info->dev;
944
945 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
946 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
947 sizeof(*req), sizeof(*resp));
948 if (IS_ERR(xfer)) {
949 ret = PTR_ERR(xfer);
950 dev_err(dev, "Message alloc failed(%d)\n", ret);
951 return ret;
952 }
953 req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
954 req->dev_id = dev_id;
955 if (clk_id < 255) {
956 req->clk_id = clk_id;
957 } else {
958 req->clk_id = 255;
959 req->clk_id_32 = clk_id;
960 }
961 req->request_state = state;
962
963 ret = ti_sci_do_xfer(info, xfer);
964 if (ret) {
965 dev_err(dev, "Mbox send fail %d\n", ret);
966 goto fail;
967 }
968
969 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
970
971 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
972
973 fail:
974 ti_sci_put_one_xfer(&info->minfo, xfer);
975
976 return ret;
977 }
978
979 /**
980 * ti_sci_cmd_get_clock_state() - Get clock state helper
981 * @handle: pointer to TI SCI handle
982 * @dev_id: Device identifier this request is for
983 * @clk_id: Clock identifier for the device for this request.
984 * Each device has it's own set of clock inputs. This indexes
985 * which clock input to modify.
986 * @programmed_state: State requested for clock to move to
987 * @current_state: State that the clock is currently in
988 *
989 * Return: 0 if all went well, else returns appropriate error value.
990 */
ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle, u32 dev_id, u32 clk_id, u8 *programmed_state, u8 *current_state)991 static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
992 u32 dev_id, u32 clk_id,
993 u8 *programmed_state, u8 *current_state)
994 {
995 struct ti_sci_info *info;
996 struct ti_sci_msg_req_get_clock_state *req;
997 struct ti_sci_msg_resp_get_clock_state *resp;
998 struct ti_sci_xfer *xfer;
999 struct device *dev;
1000 int ret = 0;
1001
1002 if (IS_ERR(handle))
1003 return PTR_ERR(handle);
1004 if (!handle)
1005 return -EINVAL;
1006
1007 if (!programmed_state && !current_state)
1008 return -EINVAL;
1009
1010 info = handle_to_ti_sci_info(handle);
1011 dev = info->dev;
1012
1013 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1014 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1015 sizeof(*req), sizeof(*resp));
1016 if (IS_ERR(xfer)) {
1017 ret = PTR_ERR(xfer);
1018 dev_err(dev, "Message alloc failed(%d)\n", ret);
1019 return ret;
1020 }
1021 req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
1022 req->dev_id = dev_id;
1023 if (clk_id < 255) {
1024 req->clk_id = clk_id;
1025 } else {
1026 req->clk_id = 255;
1027 req->clk_id_32 = clk_id;
1028 }
1029
1030 ret = ti_sci_do_xfer(info, xfer);
1031 if (ret) {
1032 dev_err(dev, "Mbox send fail %d\n", ret);
1033 goto fail;
1034 }
1035
1036 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
1037
1038 if (!ti_sci_is_response_ack(resp)) {
1039 ret = -ENODEV;
1040 goto fail;
1041 }
1042
1043 if (programmed_state)
1044 *programmed_state = resp->programmed_state;
1045 if (current_state)
1046 *current_state = resp->current_state;
1047
1048 fail:
1049 ti_sci_put_one_xfer(&info->minfo, xfer);
1050
1051 return ret;
1052 }
1053
1054 /**
1055 * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
1056 * @handle: pointer to TI SCI handle
1057 * @dev_id: Device identifier this request is for
1058 * @clk_id: Clock identifier for the device for this request.
1059 * Each device has it's own set of clock inputs. This indexes
1060 * which clock input to modify.
1061 * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
1062 * @can_change_freq: 'true' if frequency change is desired, else 'false'
1063 * @enable_input_term: 'true' if input termination is desired, else 'false'
1064 *
1065 * Return: 0 if all went well, else returns appropriate error value.
1066 */
ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id, u32 clk_id, bool needs_ssc, bool can_change_freq, bool enable_input_term)1067 static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1068 u32 clk_id, bool needs_ssc,
1069 bool can_change_freq, bool enable_input_term)
1070 {
1071 u32 flags = 0;
1072
1073 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1074 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1075 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1076
1077 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1078 MSG_CLOCK_SW_STATE_REQ);
1079 }
1080
1081 /**
1082 * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
1083 * @handle: pointer to TI SCI handle
1084 * @dev_id: Device identifier this request is for
1085 * @clk_id: Clock identifier for the device for this request.
1086 * Each device has it's own set of clock inputs. This indexes
1087 * which clock input to modify.
1088 *
1089 * NOTE: This clock must have been requested by get_clock previously.
1090 *
1091 * Return: 0 if all went well, else returns appropriate error value.
1092 */
ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle, u32 dev_id, u32 clk_id)1093 static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1094 u32 dev_id, u32 clk_id)
1095 {
1096 return ti_sci_set_clock_state(handle, dev_id, clk_id,
1097 MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
1098 MSG_CLOCK_SW_STATE_UNREQ);
1099 }
1100
1101 /**
1102 * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
1103 * @handle: pointer to TI SCI handle
1104 * @dev_id: Device identifier this request is for
1105 * @clk_id: Clock identifier for the device for this request.
1106 * Each device has it's own set of clock inputs. This indexes
1107 * which clock input to modify.
1108 *
1109 * NOTE: This clock must have been requested by get_clock previously.
1110 *
1111 * Return: 0 if all went well, else returns appropriate error value.
1112 */
ti_sci_cmd_put_clock(const struct ti_sci_handle *handle, u32 dev_id, u32 clk_id)1113 static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1114 u32 dev_id, u32 clk_id)
1115 {
1116 return ti_sci_set_clock_state(handle, dev_id, clk_id,
1117 MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
1118 MSG_CLOCK_SW_STATE_AUTO);
1119 }
1120
1121 /**
1122 * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
1123 * @handle: pointer to TI SCI handle
1124 * @dev_id: Device identifier this request is for
1125 * @clk_id: Clock identifier for the device for this request.
1126 * Each device has it's own set of clock inputs. This indexes
1127 * which clock input to modify.
1128 * @req_state: state indicating if the clock is auto managed
1129 *
1130 * Return: 0 if all went well, else returns appropriate error value.
1131 */
ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle, u32 dev_id, u32 clk_id, bool *req_state)1132 static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1133 u32 dev_id, u32 clk_id, bool *req_state)
1134 {
1135 u8 state = 0;
1136 int ret;
1137
1138 if (!req_state)
1139 return -EINVAL;
1140
1141 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1142 if (ret)
1143 return ret;
1144
1145 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1146 return 0;
1147 }
1148
1149 /**
1150 * ti_sci_cmd_clk_is_on() - Is the clock ON
1151 * @handle: pointer to TI SCI handle
1152 * @dev_id: Device identifier this request is for
1153 * @clk_id: Clock identifier for the device for this request.
1154 * Each device has it's own set of clock inputs. This indexes
1155 * which clock input to modify.
1156 * @req_state: state indicating if the clock is managed by us and enabled
1157 * @curr_state: state indicating if the clock is ready for operation
1158 *
1159 * Return: 0 if all went well, else returns appropriate error value.
1160 */
ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id, u32 clk_id, bool *req_state, bool *curr_state)1161 static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1162 u32 clk_id, bool *req_state, bool *curr_state)
1163 {
1164 u8 c_state = 0, r_state = 0;
1165 int ret;
1166
1167 if (!req_state && !curr_state)
1168 return -EINVAL;
1169
1170 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1171 &r_state, &c_state);
1172 if (ret)
1173 return ret;
1174
1175 if (req_state)
1176 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1177 if (curr_state)
1178 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1179 return 0;
1180 }
1181
1182 /**
1183 * ti_sci_cmd_clk_is_off() - Is the clock OFF
1184 * @handle: pointer to TI SCI handle
1185 * @dev_id: Device identifier this request is for
1186 * @clk_id: Clock identifier for the device for this request.
1187 * Each device has it's own set of clock inputs. This indexes
1188 * which clock input to modify.
1189 * @req_state: state indicating if the clock is managed by us and disabled
1190 * @curr_state: state indicating if the clock is NOT ready for operation
1191 *
1192 * Return: 0 if all went well, else returns appropriate error value.
1193 */
ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id, u32 clk_id, bool *req_state, bool *curr_state)1194 static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1195 u32 clk_id, bool *req_state, bool *curr_state)
1196 {
1197 u8 c_state = 0, r_state = 0;
1198 int ret;
1199
1200 if (!req_state && !curr_state)
1201 return -EINVAL;
1202
1203 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1204 &r_state, &c_state);
1205 if (ret)
1206 return ret;
1207
1208 if (req_state)
1209 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1210 if (curr_state)
1211 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1212 return 0;
1213 }
1214
1215 /**
1216 * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1217 * @handle: pointer to TI SCI handle
1218 * @dev_id: Device identifier this request is for
1219 * @clk_id: Clock identifier for the device for this request.
1220 * Each device has it's own set of clock inputs. This indexes
1221 * which clock input to modify.
1222 * @parent_id: Parent clock identifier to set
1223 *
1224 * Return: 0 if all went well, else returns appropriate error value.
1225 */
ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle, u32 dev_id, u32 clk_id, u32 parent_id)1226 static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1227 u32 dev_id, u32 clk_id, u32 parent_id)
1228 {
1229 struct ti_sci_info *info;
1230 struct ti_sci_msg_req_set_clock_parent *req;
1231 struct ti_sci_msg_hdr *resp;
1232 struct ti_sci_xfer *xfer;
1233 struct device *dev;
1234 int ret = 0;
1235
1236 if (IS_ERR(handle))
1237 return PTR_ERR(handle);
1238 if (!handle)
1239 return -EINVAL;
1240
1241 info = handle_to_ti_sci_info(handle);
1242 dev = info->dev;
1243
1244 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1245 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1246 sizeof(*req), sizeof(*resp));
1247 if (IS_ERR(xfer)) {
1248 ret = PTR_ERR(xfer);
1249 dev_err(dev, "Message alloc failed(%d)\n", ret);
1250 return ret;
1251 }
1252 req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
1253 req->dev_id = dev_id;
1254 if (clk_id < 255) {
1255 req->clk_id = clk_id;
1256 } else {
1257 req->clk_id = 255;
1258 req->clk_id_32 = clk_id;
1259 }
1260 if (parent_id < 255) {
1261 req->parent_id = parent_id;
1262 } else {
1263 req->parent_id = 255;
1264 req->parent_id_32 = parent_id;
1265 }
1266
1267 ret = ti_sci_do_xfer(info, xfer);
1268 if (ret) {
1269 dev_err(dev, "Mbox send fail %d\n", ret);
1270 goto fail;
1271 }
1272
1273 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1274
1275 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1276
1277 fail:
1278 ti_sci_put_one_xfer(&info->minfo, xfer);
1279
1280 return ret;
1281 }
1282
1283 /**
1284 * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1285 * @handle: pointer to TI SCI handle
1286 * @dev_id: Device identifier this request is for
1287 * @clk_id: Clock identifier for the device for this request.
1288 * Each device has it's own set of clock inputs. This indexes
1289 * which clock input to modify.
1290 * @parent_id: Current clock parent
1291 *
1292 * Return: 0 if all went well, else returns appropriate error value.
1293 */
ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle, u32 dev_id, u32 clk_id, u32 *parent_id)1294 static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1295 u32 dev_id, u32 clk_id, u32 *parent_id)
1296 {
1297 struct ti_sci_info *info;
1298 struct ti_sci_msg_req_get_clock_parent *req;
1299 struct ti_sci_msg_resp_get_clock_parent *resp;
1300 struct ti_sci_xfer *xfer;
1301 struct device *dev;
1302 int ret = 0;
1303
1304 if (IS_ERR(handle))
1305 return PTR_ERR(handle);
1306 if (!handle || !parent_id)
1307 return -EINVAL;
1308
1309 info = handle_to_ti_sci_info(handle);
1310 dev = info->dev;
1311
1312 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1313 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1314 sizeof(*req), sizeof(*resp));
1315 if (IS_ERR(xfer)) {
1316 ret = PTR_ERR(xfer);
1317 dev_err(dev, "Message alloc failed(%d)\n", ret);
1318 return ret;
1319 }
1320 req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
1321 req->dev_id = dev_id;
1322 if (clk_id < 255) {
1323 req->clk_id = clk_id;
1324 } else {
1325 req->clk_id = 255;
1326 req->clk_id_32 = clk_id;
1327 }
1328
1329 ret = ti_sci_do_xfer(info, xfer);
1330 if (ret) {
1331 dev_err(dev, "Mbox send fail %d\n", ret);
1332 goto fail;
1333 }
1334
1335 resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
1336
1337 if (!ti_sci_is_response_ack(resp)) {
1338 ret = -ENODEV;
1339 } else {
1340 if (resp->parent_id < 255)
1341 *parent_id = resp->parent_id;
1342 else
1343 *parent_id = resp->parent_id_32;
1344 }
1345
1346 fail:
1347 ti_sci_put_one_xfer(&info->minfo, xfer);
1348
1349 return ret;
1350 }
1351
1352 /**
1353 * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1354 * @handle: pointer to TI SCI handle
1355 * @dev_id: Device identifier this request is for
1356 * @clk_id: Clock identifier for the device for this request.
1357 * Each device has it's own set of clock inputs. This indexes
1358 * which clock input to modify.
1359 * @num_parents: Returns he number of parents to the current clock.
1360 *
1361 * Return: 0 if all went well, else returns appropriate error value.
1362 */
ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle, u32 dev_id, u32 clk_id, u32 *num_parents)1363 static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1364 u32 dev_id, u32 clk_id,
1365 u32 *num_parents)
1366 {
1367 struct ti_sci_info *info;
1368 struct ti_sci_msg_req_get_clock_num_parents *req;
1369 struct ti_sci_msg_resp_get_clock_num_parents *resp;
1370 struct ti_sci_xfer *xfer;
1371 struct device *dev;
1372 int ret = 0;
1373
1374 if (IS_ERR(handle))
1375 return PTR_ERR(handle);
1376 if (!handle || !num_parents)
1377 return -EINVAL;
1378
1379 info = handle_to_ti_sci_info(handle);
1380 dev = info->dev;
1381
1382 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1383 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1384 sizeof(*req), sizeof(*resp));
1385 if (IS_ERR(xfer)) {
1386 ret = PTR_ERR(xfer);
1387 dev_err(dev, "Message alloc failed(%d)\n", ret);
1388 return ret;
1389 }
1390 req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
1391 req->dev_id = dev_id;
1392 if (clk_id < 255) {
1393 req->clk_id = clk_id;
1394 } else {
1395 req->clk_id = 255;
1396 req->clk_id_32 = clk_id;
1397 }
1398
1399 ret = ti_sci_do_xfer(info, xfer);
1400 if (ret) {
1401 dev_err(dev, "Mbox send fail %d\n", ret);
1402 goto fail;
1403 }
1404
1405 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
1406
1407 if (!ti_sci_is_response_ack(resp)) {
1408 ret = -ENODEV;
1409 } else {
1410 if (resp->num_parents < 255)
1411 *num_parents = resp->num_parents;
1412 else
1413 *num_parents = resp->num_parents_32;
1414 }
1415
1416 fail:
1417 ti_sci_put_one_xfer(&info->minfo, xfer);
1418
1419 return ret;
1420 }
1421
1422 /**
1423 * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1424 * @handle: pointer to TI SCI handle
1425 * @dev_id: Device identifier this request is for
1426 * @clk_id: Clock identifier for the device for this request.
1427 * Each device has it's own set of clock inputs. This indexes
1428 * which clock input to modify.
1429 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1430 * allowable programmed frequency and does not account for clock
1431 * tolerances and jitter.
1432 * @target_freq: The target clock frequency in Hz. A frequency will be
1433 * processed as close to this target frequency as possible.
1434 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1435 * allowable programmed frequency and does not account for clock
1436 * tolerances and jitter.
1437 * @match_freq: Frequency match in Hz response.
1438 *
1439 * Return: 0 if all went well, else returns appropriate error value.
1440 */
ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle, u32 dev_id, u32 clk_id, u64 min_freq, u64 target_freq, u64 max_freq, u64 *match_freq)1441 static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1442 u32 dev_id, u32 clk_id, u64 min_freq,
1443 u64 target_freq, u64 max_freq,
1444 u64 *match_freq)
1445 {
1446 struct ti_sci_info *info;
1447 struct ti_sci_msg_req_query_clock_freq *req;
1448 struct ti_sci_msg_resp_query_clock_freq *resp;
1449 struct ti_sci_xfer *xfer;
1450 struct device *dev;
1451 int ret = 0;
1452
1453 if (IS_ERR(handle))
1454 return PTR_ERR(handle);
1455 if (!handle || !match_freq)
1456 return -EINVAL;
1457
1458 info = handle_to_ti_sci_info(handle);
1459 dev = info->dev;
1460
1461 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1462 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1463 sizeof(*req), sizeof(*resp));
1464 if (IS_ERR(xfer)) {
1465 ret = PTR_ERR(xfer);
1466 dev_err(dev, "Message alloc failed(%d)\n", ret);
1467 return ret;
1468 }
1469 req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
1470 req->dev_id = dev_id;
1471 if (clk_id < 255) {
1472 req->clk_id = clk_id;
1473 } else {
1474 req->clk_id = 255;
1475 req->clk_id_32 = clk_id;
1476 }
1477 req->min_freq_hz = min_freq;
1478 req->target_freq_hz = target_freq;
1479 req->max_freq_hz = max_freq;
1480
1481 ret = ti_sci_do_xfer(info, xfer);
1482 if (ret) {
1483 dev_err(dev, "Mbox send fail %d\n", ret);
1484 goto fail;
1485 }
1486
1487 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
1488
1489 if (!ti_sci_is_response_ack(resp))
1490 ret = -ENODEV;
1491 else
1492 *match_freq = resp->freq_hz;
1493
1494 fail:
1495 ti_sci_put_one_xfer(&info->minfo, xfer);
1496
1497 return ret;
1498 }
1499
1500 /**
1501 * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1502 * @handle: pointer to TI SCI handle
1503 * @dev_id: Device identifier this request is for
1504 * @clk_id: Clock identifier for the device for this request.
1505 * Each device has it's own set of clock inputs. This indexes
1506 * which clock input to modify.
1507 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1508 * allowable programmed frequency and does not account for clock
1509 * tolerances and jitter.
1510 * @target_freq: The target clock frequency in Hz. A frequency will be
1511 * processed as close to this target frequency as possible.
1512 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1513 * allowable programmed frequency and does not account for clock
1514 * tolerances and jitter.
1515 *
1516 * Return: 0 if all went well, else returns appropriate error value.
1517 */
ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle, u32 dev_id, u32 clk_id, u64 min_freq, u64 target_freq, u64 max_freq)1518 static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1519 u32 dev_id, u32 clk_id, u64 min_freq,
1520 u64 target_freq, u64 max_freq)
1521 {
1522 struct ti_sci_info *info;
1523 struct ti_sci_msg_req_set_clock_freq *req;
1524 struct ti_sci_msg_hdr *resp;
1525 struct ti_sci_xfer *xfer;
1526 struct device *dev;
1527 int ret = 0;
1528
1529 if (IS_ERR(handle))
1530 return PTR_ERR(handle);
1531 if (!handle)
1532 return -EINVAL;
1533
1534 info = handle_to_ti_sci_info(handle);
1535 dev = info->dev;
1536
1537 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1538 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1539 sizeof(*req), sizeof(*resp));
1540 if (IS_ERR(xfer)) {
1541 ret = PTR_ERR(xfer);
1542 dev_err(dev, "Message alloc failed(%d)\n", ret);
1543 return ret;
1544 }
1545 req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
1546 req->dev_id = dev_id;
1547 if (clk_id < 255) {
1548 req->clk_id = clk_id;
1549 } else {
1550 req->clk_id = 255;
1551 req->clk_id_32 = clk_id;
1552 }
1553 req->min_freq_hz = min_freq;
1554 req->target_freq_hz = target_freq;
1555 req->max_freq_hz = max_freq;
1556
1557 ret = ti_sci_do_xfer(info, xfer);
1558 if (ret) {
1559 dev_err(dev, "Mbox send fail %d\n", ret);
1560 goto fail;
1561 }
1562
1563 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1564
1565 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1566
1567 fail:
1568 ti_sci_put_one_xfer(&info->minfo, xfer);
1569
1570 return ret;
1571 }
1572
1573 /**
1574 * ti_sci_cmd_clk_get_freq() - Get current frequency
1575 * @handle: pointer to TI SCI handle
1576 * @dev_id: Device identifier this request is for
1577 * @clk_id: Clock identifier for the device for this request.
1578 * Each device has it's own set of clock inputs. This indexes
1579 * which clock input to modify.
1580 * @freq: Currently frequency in Hz
1581 *
1582 * Return: 0 if all went well, else returns appropriate error value.
1583 */
ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle, u32 dev_id, u32 clk_id, u64 *freq)1584 static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1585 u32 dev_id, u32 clk_id, u64 *freq)
1586 {
1587 struct ti_sci_info *info;
1588 struct ti_sci_msg_req_get_clock_freq *req;
1589 struct ti_sci_msg_resp_get_clock_freq *resp;
1590 struct ti_sci_xfer *xfer;
1591 struct device *dev;
1592 int ret = 0;
1593
1594 if (IS_ERR(handle))
1595 return PTR_ERR(handle);
1596 if (!handle || !freq)
1597 return -EINVAL;
1598
1599 info = handle_to_ti_sci_info(handle);
1600 dev = info->dev;
1601
1602 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1603 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1604 sizeof(*req), sizeof(*resp));
1605 if (IS_ERR(xfer)) {
1606 ret = PTR_ERR(xfer);
1607 dev_err(dev, "Message alloc failed(%d)\n", ret);
1608 return ret;
1609 }
1610 req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
1611 req->dev_id = dev_id;
1612 if (clk_id < 255) {
1613 req->clk_id = clk_id;
1614 } else {
1615 req->clk_id = 255;
1616 req->clk_id_32 = clk_id;
1617 }
1618
1619 ret = ti_sci_do_xfer(info, xfer);
1620 if (ret) {
1621 dev_err(dev, "Mbox send fail %d\n", ret);
1622 goto fail;
1623 }
1624
1625 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
1626
1627 if (!ti_sci_is_response_ack(resp))
1628 ret = -ENODEV;
1629 else
1630 *freq = resp->freq_hz;
1631
1632 fail:
1633 ti_sci_put_one_xfer(&info->minfo, xfer);
1634
1635 return ret;
1636 }
1637
ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)1638 static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1639 {
1640 struct ti_sci_info *info;
1641 struct ti_sci_msg_req_reboot *req;
1642 struct ti_sci_msg_hdr *resp;
1643 struct ti_sci_xfer *xfer;
1644 struct device *dev;
1645 int ret = 0;
1646
1647 if (IS_ERR(handle))
1648 return PTR_ERR(handle);
1649 if (!handle)
1650 return -EINVAL;
1651
1652 info = handle_to_ti_sci_info(handle);
1653 dev = info->dev;
1654
1655 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1656 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1657 sizeof(*req), sizeof(*resp));
1658 if (IS_ERR(xfer)) {
1659 ret = PTR_ERR(xfer);
1660 dev_err(dev, "Message alloc failed(%d)\n", ret);
1661 return ret;
1662 }
1663 req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
1664
1665 ret = ti_sci_do_xfer(info, xfer);
1666 if (ret) {
1667 dev_err(dev, "Mbox send fail %d\n", ret);
1668 goto fail;
1669 }
1670
1671 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1672
1673 if (!ti_sci_is_response_ack(resp))
1674 ret = -ENODEV;
1675 else
1676 ret = 0;
1677
1678 fail:
1679 ti_sci_put_one_xfer(&info->minfo, xfer);
1680
1681 return ret;
1682 }
1683
1684 /**
1685 * ti_sci_get_resource_range - Helper to get a range of resources assigned
1686 * to a host. Resource is uniquely identified by
1687 * type and subtype.
1688 * @handle: Pointer to TISCI handle.
1689 * @dev_id: TISCI device ID.
1690 * @subtype: Resource assignment subtype that is being requested
1691 * from the given device.
1692 * @s_host: Host processor ID to which the resources are allocated
1693 * @range_start: Start index of the resource range
1694 * @range_num: Number of resources in the range
1695 *
1696 * Return: 0 if all went fine, else return appropriate error.
1697 */
ti_sci_get_resource_range(const struct ti_sci_handle *handle, u32 dev_id, u8 subtype, u8 s_host, u16 *range_start, u16 *range_num)1698 static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1699 u32 dev_id, u8 subtype, u8 s_host,
1700 u16 *range_start, u16 *range_num)
1701 {
1702 struct ti_sci_msg_resp_get_resource_range *resp;
1703 struct ti_sci_msg_req_get_resource_range *req;
1704 struct ti_sci_xfer *xfer;
1705 struct ti_sci_info *info;
1706 struct device *dev;
1707 int ret = 0;
1708
1709 if (IS_ERR(handle))
1710 return PTR_ERR(handle);
1711 if (!handle)
1712 return -EINVAL;
1713
1714 info = handle_to_ti_sci_info(handle);
1715 dev = info->dev;
1716
1717 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1718 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1719 sizeof(*req), sizeof(*resp));
1720 if (IS_ERR(xfer)) {
1721 ret = PTR_ERR(xfer);
1722 dev_err(dev, "Message alloc failed(%d)\n", ret);
1723 return ret;
1724 }
1725
1726 req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
1727 req->secondary_host = s_host;
1728 req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
1729 req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1730
1731 ret = ti_sci_do_xfer(info, xfer);
1732 if (ret) {
1733 dev_err(dev, "Mbox send fail %d\n", ret);
1734 goto fail;
1735 }
1736
1737 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
1738
1739 if (!ti_sci_is_response_ack(resp)) {
1740 ret = -ENODEV;
1741 } else if (!resp->range_start && !resp->range_num) {
1742 ret = -ENODEV;
1743 } else {
1744 *range_start = resp->range_start;
1745 *range_num = resp->range_num;
1746 };
1747
1748 fail:
1749 ti_sci_put_one_xfer(&info->minfo, xfer);
1750
1751 return ret;
1752 }
1753
1754 /**
1755 * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1756 * that is same as ti sci interface host.
1757 * @handle: Pointer to TISCI handle.
1758 * @dev_id: TISCI device ID.
1759 * @subtype: Resource assignment subtype that is being requested
1760 * from the given device.
1761 * @range_start: Start index of the resource range
1762 * @range_num: Number of resources in the range
1763 *
1764 * Return: 0 if all went fine, else return appropriate error.
1765 */
ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle, u32 dev_id, u8 subtype, u16 *range_start, u16 *range_num)1766 static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1767 u32 dev_id, u8 subtype,
1768 u16 *range_start, u16 *range_num)
1769 {
1770 return ti_sci_get_resource_range(handle, dev_id, subtype,
1771 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1772 range_start, range_num);
1773 }
1774
1775 /**
1776 * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1777 * assigned to a specified host.
1778 * @handle: Pointer to TISCI handle.
1779 * @dev_id: TISCI device ID.
1780 * @subtype: Resource assignment subtype that is being requested
1781 * from the given device.
1782 * @s_host: Host processor ID to which the resources are allocated
1783 * @range_start: Start index of the resource range
1784 * @range_num: Number of resources in the range
1785 *
1786 * Return: 0 if all went fine, else return appropriate error.
1787 */
1788 static
ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle, u32 dev_id, u8 subtype, u8 s_host, u16 *range_start, u16 *range_num)1789 int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1790 u32 dev_id, u8 subtype, u8 s_host,
1791 u16 *range_start, u16 *range_num)
1792 {
1793 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1794 range_start, range_num);
1795 }
1796
1797 /**
1798 * ti_sci_manage_irq() - Helper api to configure/release the irq route between
1799 * the requested source and destination
1800 * @handle: Pointer to TISCI handle.
1801 * @valid_params: Bit fields defining the validity of certain params
1802 * @src_id: Device ID of the IRQ source
1803 * @src_index: IRQ source index within the source device
1804 * @dst_id: Device ID of the IRQ destination
1805 * @dst_host_irq: IRQ number of the destination device
1806 * @ia_id: Device ID of the IA, if the IRQ flows through this IA
1807 * @vint: Virtual interrupt to be used within the IA
1808 * @global_event: Global event number to be used for the requesting event
1809 * @vint_status_bit: Virtual interrupt status bit to be used for the event
1810 * @s_host: Secondary host ID to which the irq/event is being
1811 * requested for.
1812 * @type: Request type irq set or release.
1813 *
1814 * Return: 0 if all went fine, else return appropriate error.
1815 */
ti_sci_manage_irq(const struct ti_sci_handle *handle, u32 valid_params, u16 src_id, u16 src_index, u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint, u16 global_event, u8 vint_status_bit, u8 s_host, u16 type)1816 static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
1817 u32 valid_params, u16 src_id, u16 src_index,
1818 u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
1819 u16 global_event, u8 vint_status_bit, u8 s_host,
1820 u16 type)
1821 {
1822 struct ti_sci_msg_req_manage_irq *req;
1823 struct ti_sci_msg_hdr *resp;
1824 struct ti_sci_xfer *xfer;
1825 struct ti_sci_info *info;
1826 struct device *dev;
1827 int ret = 0;
1828
1829 if (IS_ERR(handle))
1830 return PTR_ERR(handle);
1831 if (!handle)
1832 return -EINVAL;
1833
1834 info = handle_to_ti_sci_info(handle);
1835 dev = info->dev;
1836
1837 xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1838 sizeof(*req), sizeof(*resp));
1839 if (IS_ERR(xfer)) {
1840 ret = PTR_ERR(xfer);
1841 dev_err(dev, "Message alloc failed(%d)\n", ret);
1842 return ret;
1843 }
1844 req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
1845 req->valid_params = valid_params;
1846 req->src_id = src_id;
1847 req->src_index = src_index;
1848 req->dst_id = dst_id;
1849 req->dst_host_irq = dst_host_irq;
1850 req->ia_id = ia_id;
1851 req->vint = vint;
1852 req->global_event = global_event;
1853 req->vint_status_bit = vint_status_bit;
1854 req->secondary_host = s_host;
1855
1856 ret = ti_sci_do_xfer(info, xfer);
1857 if (ret) {
1858 dev_err(dev, "Mbox send fail %d\n", ret);
1859 goto fail;
1860 }
1861
1862 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1863
1864 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1865
1866 fail:
1867 ti_sci_put_one_xfer(&info->minfo, xfer);
1868
1869 return ret;
1870 }
1871
1872 /**
1873 * ti_sci_set_irq() - Helper api to configure the irq route between the
1874 * requested source and destination
1875 * @handle: Pointer to TISCI handle.
1876 * @valid_params: Bit fields defining the validity of certain params
1877 * @src_id: Device ID of the IRQ source
1878 * @src_index: IRQ source index within the source device
1879 * @dst_id: Device ID of the IRQ destination
1880 * @dst_host_irq: IRQ number of the destination device
1881 * @ia_id: Device ID of the IA, if the IRQ flows through this IA
1882 * @vint: Virtual interrupt to be used within the IA
1883 * @global_event: Global event number to be used for the requesting event
1884 * @vint_status_bit: Virtual interrupt status bit to be used for the event
1885 * @s_host: Secondary host ID to which the irq/event is being
1886 * requested for.
1887 *
1888 * Return: 0 if all went fine, else return appropriate error.
1889 */
ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params, u16 src_id, u16 src_index, u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint, u16 global_event, u8 vint_status_bit, u8 s_host)1890 static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
1891 u16 src_id, u16 src_index, u16 dst_id,
1892 u16 dst_host_irq, u16 ia_id, u16 vint,
1893 u16 global_event, u8 vint_status_bit, u8 s_host)
1894 {
1895 pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1896 __func__, valid_params, src_id, src_index,
1897 dst_id, dst_host_irq, ia_id, vint, global_event,
1898 vint_status_bit);
1899
1900 return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1901 dst_id, dst_host_irq, ia_id, vint,
1902 global_event, vint_status_bit, s_host,
1903 TI_SCI_MSG_SET_IRQ);
1904 }
1905
1906 /**
1907 * ti_sci_free_irq() - Helper api to free the irq route between the
1908 * requested source and destination
1909 * @handle: Pointer to TISCI handle.
1910 * @valid_params: Bit fields defining the validity of certain params
1911 * @src_id: Device ID of the IRQ source
1912 * @src_index: IRQ source index within the source device
1913 * @dst_id: Device ID of the IRQ destination
1914 * @dst_host_irq: IRQ number of the destination device
1915 * @ia_id: Device ID of the IA, if the IRQ flows through this IA
1916 * @vint: Virtual interrupt to be used within the IA
1917 * @global_event: Global event number to be used for the requesting event
1918 * @vint_status_bit: Virtual interrupt status bit to be used for the event
1919 * @s_host: Secondary host ID to which the irq/event is being
1920 * requested for.
1921 *
1922 * Return: 0 if all went fine, else return appropriate error.
1923 */
ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params, u16 src_id, u16 src_index, u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint, u16 global_event, u8 vint_status_bit, u8 s_host)1924 static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
1925 u16 src_id, u16 src_index, u16 dst_id,
1926 u16 dst_host_irq, u16 ia_id, u16 vint,
1927 u16 global_event, u8 vint_status_bit, u8 s_host)
1928 {
1929 pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1930 __func__, valid_params, src_id, src_index,
1931 dst_id, dst_host_irq, ia_id, vint, global_event,
1932 vint_status_bit);
1933
1934 return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1935 dst_id, dst_host_irq, ia_id, vint,
1936 global_event, vint_status_bit, s_host,
1937 TI_SCI_MSG_FREE_IRQ);
1938 }
1939
1940 /**
1941 * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
1942 * source and destination.
1943 * @handle: Pointer to TISCI handle.
1944 * @src_id: Device ID of the IRQ source
1945 * @src_index: IRQ source index within the source device
1946 * @dst_id: Device ID of the IRQ destination
1947 * @dst_host_irq: IRQ number of the destination device
1948 * @vint_irq: Boolean specifying if this interrupt belongs to
1949 * Interrupt Aggregator.
1950 *
1951 * Return: 0 if all went fine, else return appropriate error.
1952 */
ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id, u16 src_index, u16 dst_id, u16 dst_host_irq)1953 static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
1954 u16 src_index, u16 dst_id, u16 dst_host_irq)
1955 {
1956 u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
1957
1958 return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
1959 dst_host_irq, 0, 0, 0, 0, 0);
1960 }
1961
1962 /**
1963 * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
1964 * requested source and Interrupt Aggregator.
1965 * @handle: Pointer to TISCI handle.
1966 * @src_id: Device ID of the IRQ source
1967 * @src_index: IRQ source index within the source device
1968 * @ia_id: Device ID of the IA, if the IRQ flows through this IA
1969 * @vint: Virtual interrupt to be used within the IA
1970 * @global_event: Global event number to be used for the requesting event
1971 * @vint_status_bit: Virtual interrupt status bit to be used for the event
1972 *
1973 * Return: 0 if all went fine, else return appropriate error.
1974 */
ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle, u16 src_id, u16 src_index, u16 ia_id, u16 vint, u16 global_event, u8 vint_status_bit)1975 static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
1976 u16 src_id, u16 src_index, u16 ia_id,
1977 u16 vint, u16 global_event,
1978 u8 vint_status_bit)
1979 {
1980 u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
1981 MSG_FLAG_GLB_EVNT_VALID |
1982 MSG_FLAG_VINT_STS_BIT_VALID;
1983
1984 return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
1985 ia_id, vint, global_event, vint_status_bit, 0);
1986 }
1987
1988 /**
1989 * ti_sci_cmd_free_irq() - Free a host irq route between the between the
1990 * requested source and destination.
1991 * @handle: Pointer to TISCI handle.
1992 * @src_id: Device ID of the IRQ source
1993 * @src_index: IRQ source index within the source device
1994 * @dst_id: Device ID of the IRQ destination
1995 * @dst_host_irq: IRQ number of the destination device
1996 * @vint_irq: Boolean specifying if this interrupt belongs to
1997 * Interrupt Aggregator.
1998 *
1999 * Return: 0 if all went fine, else return appropriate error.
2000 */
ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id, u16 src_index, u16 dst_id, u16 dst_host_irq)2001 static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
2002 u16 src_index, u16 dst_id, u16 dst_host_irq)
2003 {
2004 u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
2005
2006 return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
2007 dst_host_irq, 0, 0, 0, 0, 0);
2008 }
2009
2010 /**
2011 * ti_sci_cmd_free_event_map() - Free an event map between the requested source
2012 * and Interrupt Aggregator.
2013 * @handle: Pointer to TISCI handle.
2014 * @src_id: Device ID of the IRQ source
2015 * @src_index: IRQ source index within the source device
2016 * @ia_id: Device ID of the IA, if the IRQ flows through this IA
2017 * @vint: Virtual interrupt to be used within the IA
2018 * @global_event: Global event number to be used for the requesting event
2019 * @vint_status_bit: Virtual interrupt status bit to be used for the event
2020 *
2021 * Return: 0 if all went fine, else return appropriate error.
2022 */
ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle, u16 src_id, u16 src_index, u16 ia_id, u16 vint, u16 global_event, u8 vint_status_bit)2023 static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
2024 u16 src_id, u16 src_index, u16 ia_id,
2025 u16 vint, u16 global_event,
2026 u8 vint_status_bit)
2027 {
2028 u32 valid_params = MSG_FLAG_IA_ID_VALID |
2029 MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
2030 MSG_FLAG_VINT_STS_BIT_VALID;
2031
2032 return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
2033 ia_id, vint, global_event, vint_status_bit, 0);
2034 }
2035
2036 /**
2037 * ti_sci_cmd_ring_config() - configure RA ring
2038 * @handle: Pointer to TI SCI handle.
2039 * @valid_params: Bitfield defining validity of ring configuration
2040 * parameters
2041 * @nav_id: Device ID of Navigator Subsystem from which the ring is
2042 * allocated
2043 * @index: Ring index
2044 * @addr_lo: The ring base address lo 32 bits
2045 * @addr_hi: The ring base address hi 32 bits
2046 * @count: Number of ring elements
2047 * @mode: The mode of the ring
2048 * @size: The ring element size.
2049 * @order_id: Specifies the ring's bus order ID
2050 *
2051 * Return: 0 if all went well, else returns appropriate error value.
2052 *
2053 * See @ti_sci_msg_rm_ring_cfg_req for more info.
2054 */
ti_sci_cmd_ring_config(const struct ti_sci_handle *handle, u32 valid_params, u16 nav_id, u16 index, u32 addr_lo, u32 addr_hi, u32 count, u8 mode, u8 size, u8 order_id)2055 static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2056 u32 valid_params, u16 nav_id, u16 index,
2057 u32 addr_lo, u32 addr_hi, u32 count,
2058 u8 mode, u8 size, u8 order_id)
2059 {
2060 struct ti_sci_msg_rm_ring_cfg_req *req;
2061 struct ti_sci_msg_hdr *resp;
2062 struct ti_sci_xfer *xfer;
2063 struct ti_sci_info *info;
2064 struct device *dev;
2065 int ret = 0;
2066
2067 if (IS_ERR_OR_NULL(handle))
2068 return -EINVAL;
2069
2070 info = handle_to_ti_sci_info(handle);
2071 dev = info->dev;
2072
2073 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2074 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2075 sizeof(*req), sizeof(*resp));
2076 if (IS_ERR(xfer)) {
2077 ret = PTR_ERR(xfer);
2078 dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
2079 return ret;
2080 }
2081 req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
2082 req->valid_params = valid_params;
2083 req->nav_id = nav_id;
2084 req->index = index;
2085 req->addr_lo = addr_lo;
2086 req->addr_hi = addr_hi;
2087 req->count = count;
2088 req->mode = mode;
2089 req->size = size;
2090 req->order_id = order_id;
2091
2092 ret = ti_sci_do_xfer(info, xfer);
2093 if (ret) {
2094 dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
2095 goto fail;
2096 }
2097
2098 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2099 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2100
2101 fail:
2102 ti_sci_put_one_xfer(&info->minfo, xfer);
2103 dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2104 return ret;
2105 }
2106
2107 /**
2108 * ti_sci_cmd_ring_get_config() - get RA ring configuration
2109 * @handle: Pointer to TI SCI handle.
2110 * @nav_id: Device ID of Navigator Subsystem from which the ring is
2111 * allocated
2112 * @index: Ring index
2113 * @addr_lo: Returns ring's base address lo 32 bits
2114 * @addr_hi: Returns ring's base address hi 32 bits
2115 * @count: Returns number of ring elements
2116 * @mode: Returns mode of the ring
2117 * @size: Returns ring element size
2118 * @order_id: Returns ring's bus order ID
2119 *
2120 * Return: 0 if all went well, else returns appropriate error value.
2121 *
2122 * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
2123 */
ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle, u32 nav_id, u32 index, u8 *mode, u32 *addr_lo, u32 *addr_hi, u32 *count, u8 *size, u8 *order_id)2124 static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
2125 u32 nav_id, u32 index, u8 *mode,
2126 u32 *addr_lo, u32 *addr_hi,
2127 u32 *count, u8 *size, u8 *order_id)
2128 {
2129 struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
2130 struct ti_sci_msg_rm_ring_get_cfg_req *req;
2131 struct ti_sci_xfer *xfer;
2132 struct ti_sci_info *info;
2133 struct device *dev;
2134 int ret = 0;
2135
2136 if (IS_ERR_OR_NULL(handle))
2137 return -EINVAL;
2138
2139 info = handle_to_ti_sci_info(handle);
2140 dev = info->dev;
2141
2142 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
2143 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2144 sizeof(*req), sizeof(*resp));
2145 if (IS_ERR(xfer)) {
2146 ret = PTR_ERR(xfer);
2147 dev_err(dev,
2148 "RM_RA:Message get config failed(%d)\n", ret);
2149 return ret;
2150 }
2151 req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
2152 req->nav_id = nav_id;
2153 req->index = index;
2154
2155 ret = ti_sci_do_xfer(info, xfer);
2156 if (ret) {
2157 dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret);
2158 goto fail;
2159 }
2160
2161 resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
2162
2163 if (!ti_sci_is_response_ack(resp)) {
2164 ret = -ENODEV;
2165 } else {
2166 if (mode)
2167 *mode = resp->mode;
2168 if (addr_lo)
2169 *addr_lo = resp->addr_lo;
2170 if (addr_hi)
2171 *addr_hi = resp->addr_hi;
2172 if (count)
2173 *count = resp->count;
2174 if (size)
2175 *size = resp->size;
2176 if (order_id)
2177 *order_id = resp->order_id;
2178 };
2179
2180 fail:
2181 ti_sci_put_one_xfer(&info->minfo, xfer);
2182 dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
2183 return ret;
2184 }
2185
2186 /**
2187 * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
2188 * @handle: Pointer to TI SCI handle.
2189 * @nav_id: Device ID of Navigator Subsystem which should be used for
2190 * pairing
2191 * @src_thread: Source PSI-L thread ID
2192 * @dst_thread: Destination PSI-L thread ID
2193 *
2194 * Return: 0 if all went well, else returns appropriate error value.
2195 */
ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle, u32 nav_id, u32 src_thread, u32 dst_thread)2196 static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2197 u32 nav_id, u32 src_thread, u32 dst_thread)
2198 {
2199 struct ti_sci_msg_psil_pair *req;
2200 struct ti_sci_msg_hdr *resp;
2201 struct ti_sci_xfer *xfer;
2202 struct ti_sci_info *info;
2203 struct device *dev;
2204 int ret = 0;
2205
2206 if (IS_ERR(handle))
2207 return PTR_ERR(handle);
2208 if (!handle)
2209 return -EINVAL;
2210
2211 info = handle_to_ti_sci_info(handle);
2212 dev = info->dev;
2213
2214 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2215 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2216 sizeof(*req), sizeof(*resp));
2217 if (IS_ERR(xfer)) {
2218 ret = PTR_ERR(xfer);
2219 dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2220 return ret;
2221 }
2222 req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
2223 req->nav_id = nav_id;
2224 req->src_thread = src_thread;
2225 req->dst_thread = dst_thread;
2226
2227 ret = ti_sci_do_xfer(info, xfer);
2228 if (ret) {
2229 dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2230 goto fail;
2231 }
2232
2233 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2234 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2235
2236 fail:
2237 ti_sci_put_one_xfer(&info->minfo, xfer);
2238
2239 return ret;
2240 }
2241
2242 /**
2243 * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
2244 * @handle: Pointer to TI SCI handle.
2245 * @nav_id: Device ID of Navigator Subsystem which should be used for
2246 * unpairing
2247 * @src_thread: Source PSI-L thread ID
2248 * @dst_thread: Destination PSI-L thread ID
2249 *
2250 * Return: 0 if all went well, else returns appropriate error value.
2251 */
ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle, u32 nav_id, u32 src_thread, u32 dst_thread)2252 static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2253 u32 nav_id, u32 src_thread, u32 dst_thread)
2254 {
2255 struct ti_sci_msg_psil_unpair *req;
2256 struct ti_sci_msg_hdr *resp;
2257 struct ti_sci_xfer *xfer;
2258 struct ti_sci_info *info;
2259 struct device *dev;
2260 int ret = 0;
2261
2262 if (IS_ERR(handle))
2263 return PTR_ERR(handle);
2264 if (!handle)
2265 return -EINVAL;
2266
2267 info = handle_to_ti_sci_info(handle);
2268 dev = info->dev;
2269
2270 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2271 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2272 sizeof(*req), sizeof(*resp));
2273 if (IS_ERR(xfer)) {
2274 ret = PTR_ERR(xfer);
2275 dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2276 return ret;
2277 }
2278 req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
2279 req->nav_id = nav_id;
2280 req->src_thread = src_thread;
2281 req->dst_thread = dst_thread;
2282
2283 ret = ti_sci_do_xfer(info, xfer);
2284 if (ret) {
2285 dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2286 goto fail;
2287 }
2288
2289 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2290 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2291
2292 fail:
2293 ti_sci_put_one_xfer(&info->minfo, xfer);
2294
2295 return ret;
2296 }
2297
2298 /**
2299 * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
2300 * @handle: Pointer to TI SCI handle.
2301 * @params: Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
2302 * structure
2303 *
2304 * Return: 0 if all went well, else returns appropriate error value.
2305 *
2306 * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
2307 * more info.
2308 */
ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle, const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)2309 static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
2310 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2311 {
2312 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
2313 struct ti_sci_msg_hdr *resp;
2314 struct ti_sci_xfer *xfer;
2315 struct ti_sci_info *info;
2316 struct device *dev;
2317 int ret = 0;
2318
2319 if (IS_ERR_OR_NULL(handle))
2320 return -EINVAL;
2321
2322 info = handle_to_ti_sci_info(handle);
2323 dev = info->dev;
2324
2325 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2326 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2327 sizeof(*req), sizeof(*resp));
2328 if (IS_ERR(xfer)) {
2329 ret = PTR_ERR(xfer);
2330 dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2331 return ret;
2332 }
2333 req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
2334 req->valid_params = params->valid_params;
2335 req->nav_id = params->nav_id;
2336 req->index = params->index;
2337 req->tx_pause_on_err = params->tx_pause_on_err;
2338 req->tx_filt_einfo = params->tx_filt_einfo;
2339 req->tx_filt_pswords = params->tx_filt_pswords;
2340 req->tx_atype = params->tx_atype;
2341 req->tx_chan_type = params->tx_chan_type;
2342 req->tx_supr_tdpkt = params->tx_supr_tdpkt;
2343 req->tx_fetch_size = params->tx_fetch_size;
2344 req->tx_credit_count = params->tx_credit_count;
2345 req->txcq_qnum = params->txcq_qnum;
2346 req->tx_priority = params->tx_priority;
2347 req->tx_qos = params->tx_qos;
2348 req->tx_orderid = params->tx_orderid;
2349 req->fdepth = params->fdepth;
2350 req->tx_sched_priority = params->tx_sched_priority;
2351 req->tx_burst_size = params->tx_burst_size;
2352
2353 ret = ti_sci_do_xfer(info, xfer);
2354 if (ret) {
2355 dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2356 goto fail;
2357 }
2358
2359 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2360 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2361
2362 fail:
2363 ti_sci_put_one_xfer(&info->minfo, xfer);
2364 dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2365 return ret;
2366 }
2367
2368 /**
2369 * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
2370 * @handle: Pointer to TI SCI handle.
2371 * @params: Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
2372 * structure
2373 *
2374 * Return: 0 if all went well, else returns appropriate error value.
2375 *
2376 * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
2377 * more info.
2378 */
ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle, const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)2379 static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
2380 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2381 {
2382 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
2383 struct ti_sci_msg_hdr *resp;
2384 struct ti_sci_xfer *xfer;
2385 struct ti_sci_info *info;
2386 struct device *dev;
2387 int ret = 0;
2388
2389 if (IS_ERR_OR_NULL(handle))
2390 return -EINVAL;
2391
2392 info = handle_to_ti_sci_info(handle);
2393 dev = info->dev;
2394
2395 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2396 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2397 sizeof(*req), sizeof(*resp));
2398 if (IS_ERR(xfer)) {
2399 ret = PTR_ERR(xfer);
2400 dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2401 return ret;
2402 }
2403 req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
2404 req->valid_params = params->valid_params;
2405 req->nav_id = params->nav_id;
2406 req->index = params->index;
2407 req->rx_fetch_size = params->rx_fetch_size;
2408 req->rxcq_qnum = params->rxcq_qnum;
2409 req->rx_priority = params->rx_priority;
2410 req->rx_qos = params->rx_qos;
2411 req->rx_orderid = params->rx_orderid;
2412 req->rx_sched_priority = params->rx_sched_priority;
2413 req->flowid_start = params->flowid_start;
2414 req->flowid_cnt = params->flowid_cnt;
2415 req->rx_pause_on_err = params->rx_pause_on_err;
2416 req->rx_atype = params->rx_atype;
2417 req->rx_chan_type = params->rx_chan_type;
2418 req->rx_ignore_short = params->rx_ignore_short;
2419 req->rx_ignore_long = params->rx_ignore_long;
2420 req->rx_burst_size = params->rx_burst_size;
2421
2422 ret = ti_sci_do_xfer(info, xfer);
2423 if (ret) {
2424 dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2425 goto fail;
2426 }
2427
2428 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2429 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2430
2431 fail:
2432 ti_sci_put_one_xfer(&info->minfo, xfer);
2433 dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2434 return ret;
2435 }
2436
2437 /**
2438 * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
2439 * @handle: Pointer to TI SCI handle.
2440 * @params: Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
2441 * structure
2442 *
2443 * Return: 0 if all went well, else returns appropriate error value.
2444 *
2445 * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
2446 * more info.
2447 */
ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle, const struct ti_sci_msg_rm_udmap_flow_cfg *params)2448 static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
2449 const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2450 {
2451 struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
2452 struct ti_sci_msg_hdr *resp;
2453 struct ti_sci_xfer *xfer;
2454 struct ti_sci_info *info;
2455 struct device *dev;
2456 int ret = 0;
2457
2458 if (IS_ERR_OR_NULL(handle))
2459 return -EINVAL;
2460
2461 info = handle_to_ti_sci_info(handle);
2462 dev = info->dev;
2463
2464 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2465 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2466 sizeof(*req), sizeof(*resp));
2467 if (IS_ERR(xfer)) {
2468 ret = PTR_ERR(xfer);
2469 dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2470 return ret;
2471 }
2472 req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
2473 req->valid_params = params->valid_params;
2474 req->nav_id = params->nav_id;
2475 req->flow_index = params->flow_index;
2476 req->rx_einfo_present = params->rx_einfo_present;
2477 req->rx_psinfo_present = params->rx_psinfo_present;
2478 req->rx_error_handling = params->rx_error_handling;
2479 req->rx_desc_type = params->rx_desc_type;
2480 req->rx_sop_offset = params->rx_sop_offset;
2481 req->rx_dest_qnum = params->rx_dest_qnum;
2482 req->rx_src_tag_hi = params->rx_src_tag_hi;
2483 req->rx_src_tag_lo = params->rx_src_tag_lo;
2484 req->rx_dest_tag_hi = params->rx_dest_tag_hi;
2485 req->rx_dest_tag_lo = params->rx_dest_tag_lo;
2486 req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2487 req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2488 req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2489 req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2490 req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2491 req->rx_fdq1_qnum = params->rx_fdq1_qnum;
2492 req->rx_fdq2_qnum = params->rx_fdq2_qnum;
2493 req->rx_fdq3_qnum = params->rx_fdq3_qnum;
2494 req->rx_ps_location = params->rx_ps_location;
2495
2496 ret = ti_sci_do_xfer(info, xfer);
2497 if (ret) {
2498 dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2499 goto fail;
2500 }
2501
2502 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2503 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2504
2505 fail:
2506 ti_sci_put_one_xfer(&info->minfo, xfer);
2507 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2508 return ret;
2509 }
2510
2511 /**
2512 * ti_sci_cmd_proc_request() - Command to request a physical processor control
2513 * @handle: Pointer to TI SCI handle
2514 * @proc_id: Processor ID this request is for
2515 *
2516 * Return: 0 if all went well, else returns appropriate error value.
2517 */
ti_sci_cmd_proc_request(const struct ti_sci_handle *handle, u8 proc_id)2518 static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
2519 u8 proc_id)
2520 {
2521 struct ti_sci_msg_req_proc_request *req;
2522 struct ti_sci_msg_hdr *resp;
2523 struct ti_sci_info *info;
2524 struct ti_sci_xfer *xfer;
2525 struct device *dev;
2526 int ret = 0;
2527
2528 if (!handle)
2529 return -EINVAL;
2530 if (IS_ERR(handle))
2531 return PTR_ERR(handle);
2532
2533 info = handle_to_ti_sci_info(handle);
2534 dev = info->dev;
2535
2536 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
2537 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2538 sizeof(*req), sizeof(*resp));
2539 if (IS_ERR(xfer)) {
2540 ret = PTR_ERR(xfer);
2541 dev_err(dev, "Message alloc failed(%d)\n", ret);
2542 return ret;
2543 }
2544 req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
2545 req->processor_id = proc_id;
2546
2547 ret = ti_sci_do_xfer(info, xfer);
2548 if (ret) {
2549 dev_err(dev, "Mbox send fail %d\n", ret);
2550 goto fail;
2551 }
2552
2553 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2554
2555 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2556
2557 fail:
2558 ti_sci_put_one_xfer(&info->minfo, xfer);
2559
2560 return ret;
2561 }
2562
2563 /**
2564 * ti_sci_cmd_proc_release() - Command to release a physical processor control
2565 * @handle: Pointer to TI SCI handle
2566 * @proc_id: Processor ID this request is for
2567 *
2568 * Return: 0 if all went well, else returns appropriate error value.
2569 */
ti_sci_cmd_proc_release(const struct ti_sci_handle *handle, u8 proc_id)2570 static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
2571 u8 proc_id)
2572 {
2573 struct ti_sci_msg_req_proc_release *req;
2574 struct ti_sci_msg_hdr *resp;
2575 struct ti_sci_info *info;
2576 struct ti_sci_xfer *xfer;
2577 struct device *dev;
2578 int ret = 0;
2579
2580 if (!handle)
2581 return -EINVAL;
2582 if (IS_ERR(handle))
2583 return PTR_ERR(handle);
2584
2585 info = handle_to_ti_sci_info(handle);
2586 dev = info->dev;
2587
2588 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
2589 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2590 sizeof(*req), sizeof(*resp));
2591 if (IS_ERR(xfer)) {
2592 ret = PTR_ERR(xfer);
2593 dev_err(dev, "Message alloc failed(%d)\n", ret);
2594 return ret;
2595 }
2596 req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
2597 req->processor_id = proc_id;
2598
2599 ret = ti_sci_do_xfer(info, xfer);
2600 if (ret) {
2601 dev_err(dev, "Mbox send fail %d\n", ret);
2602 goto fail;
2603 }
2604
2605 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2606
2607 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2608
2609 fail:
2610 ti_sci_put_one_xfer(&info->minfo, xfer);
2611
2612 return ret;
2613 }
2614
2615 /**
2616 * ti_sci_cmd_proc_handover() - Command to handover a physical processor
2617 * control to a host in the processor's access
2618 * control list.
2619 * @handle: Pointer to TI SCI handle
2620 * @proc_id: Processor ID this request is for
2621 * @host_id: Host ID to get the control of the processor
2622 *
2623 * Return: 0 if all went well, else returns appropriate error value.
2624 */
ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle, u8 proc_id, u8 host_id)2625 static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
2626 u8 proc_id, u8 host_id)
2627 {
2628 struct ti_sci_msg_req_proc_handover *req;
2629 struct ti_sci_msg_hdr *resp;
2630 struct ti_sci_info *info;
2631 struct ti_sci_xfer *xfer;
2632 struct device *dev;
2633 int ret = 0;
2634
2635 if (!handle)
2636 return -EINVAL;
2637 if (IS_ERR(handle))
2638 return PTR_ERR(handle);
2639
2640 info = handle_to_ti_sci_info(handle);
2641 dev = info->dev;
2642
2643 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
2644 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2645 sizeof(*req), sizeof(*resp));
2646 if (IS_ERR(xfer)) {
2647 ret = PTR_ERR(xfer);
2648 dev_err(dev, "Message alloc failed(%d)\n", ret);
2649 return ret;
2650 }
2651 req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
2652 req->processor_id = proc_id;
2653 req->host_id = host_id;
2654
2655 ret = ti_sci_do_xfer(info, xfer);
2656 if (ret) {
2657 dev_err(dev, "Mbox send fail %d\n", ret);
2658 goto fail;
2659 }
2660
2661 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2662
2663 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2664
2665 fail:
2666 ti_sci_put_one_xfer(&info->minfo, xfer);
2667
2668 return ret;
2669 }
2670
2671 /**
2672 * ti_sci_cmd_proc_set_config() - Command to set the processor boot
2673 * configuration flags
2674 * @handle: Pointer to TI SCI handle
2675 * @proc_id: Processor ID this request is for
2676 * @config_flags_set: Configuration flags to be set
2677 * @config_flags_clear: Configuration flags to be cleared.
2678 *
2679 * Return: 0 if all went well, else returns appropriate error value.
2680 */
ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle, u8 proc_id, u64 bootvector, u32 config_flags_set, u32 config_flags_clear)2681 static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
2682 u8 proc_id, u64 bootvector,
2683 u32 config_flags_set,
2684 u32 config_flags_clear)
2685 {
2686 struct ti_sci_msg_req_set_config *req;
2687 struct ti_sci_msg_hdr *resp;
2688 struct ti_sci_info *info;
2689 struct ti_sci_xfer *xfer;
2690 struct device *dev;
2691 int ret = 0;
2692
2693 if (!handle)
2694 return -EINVAL;
2695 if (IS_ERR(handle))
2696 return PTR_ERR(handle);
2697
2698 info = handle_to_ti_sci_info(handle);
2699 dev = info->dev;
2700
2701 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
2702 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2703 sizeof(*req), sizeof(*resp));
2704 if (IS_ERR(xfer)) {
2705 ret = PTR_ERR(xfer);
2706 dev_err(dev, "Message alloc failed(%d)\n", ret);
2707 return ret;
2708 }
2709 req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
2710 req->processor_id = proc_id;
2711 req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
2712 req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
2713 TI_SCI_ADDR_HIGH_SHIFT;
2714 req->config_flags_set = config_flags_set;
2715 req->config_flags_clear = config_flags_clear;
2716
2717 ret = ti_sci_do_xfer(info, xfer);
2718 if (ret) {
2719 dev_err(dev, "Mbox send fail %d\n", ret);
2720 goto fail;
2721 }
2722
2723 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2724
2725 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2726
2727 fail:
2728 ti_sci_put_one_xfer(&info->minfo, xfer);
2729
2730 return ret;
2731 }
2732
2733 /**
2734 * ti_sci_cmd_proc_set_control() - Command to set the processor boot
2735 * control flags
2736 * @handle: Pointer to TI SCI handle
2737 * @proc_id: Processor ID this request is for
2738 * @control_flags_set: Control flags to be set
2739 * @control_flags_clear: Control flags to be cleared
2740 *
2741 * Return: 0 if all went well, else returns appropriate error value.
2742 */
ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle, u8 proc_id, u32 control_flags_set, u32 control_flags_clear)2743 static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
2744 u8 proc_id, u32 control_flags_set,
2745 u32 control_flags_clear)
2746 {
2747 struct ti_sci_msg_req_set_ctrl *req;
2748 struct ti_sci_msg_hdr *resp;
2749 struct ti_sci_info *info;
2750 struct ti_sci_xfer *xfer;
2751 struct device *dev;
2752 int ret = 0;
2753
2754 if (!handle)
2755 return -EINVAL;
2756 if (IS_ERR(handle))
2757 return PTR_ERR(handle);
2758
2759 info = handle_to_ti_sci_info(handle);
2760 dev = info->dev;
2761
2762 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
2763 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2764 sizeof(*req), sizeof(*resp));
2765 if (IS_ERR(xfer)) {
2766 ret = PTR_ERR(xfer);
2767 dev_err(dev, "Message alloc failed(%d)\n", ret);
2768 return ret;
2769 }
2770 req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
2771 req->processor_id = proc_id;
2772 req->control_flags_set = control_flags_set;
2773 req->control_flags_clear = control_flags_clear;
2774
2775 ret = ti_sci_do_xfer(info, xfer);
2776 if (ret) {
2777 dev_err(dev, "Mbox send fail %d\n", ret);
2778 goto fail;
2779 }
2780
2781 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2782
2783 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2784
2785 fail:
2786 ti_sci_put_one_xfer(&info->minfo, xfer);
2787
2788 return ret;
2789 }
2790
2791 /**
2792 * ti_sci_cmd_get_boot_status() - Command to get the processor boot status
2793 * @handle: Pointer to TI SCI handle
2794 * @proc_id: Processor ID this request is for
2795 *
2796 * Return: 0 if all went well, else returns appropriate error value.
2797 */
ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle, u8 proc_id, u64 *bv, u32 *cfg_flags, u32 *ctrl_flags, u32 *sts_flags)2798 static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
2799 u8 proc_id, u64 *bv, u32 *cfg_flags,
2800 u32 *ctrl_flags, u32 *sts_flags)
2801 {
2802 struct ti_sci_msg_resp_get_status *resp;
2803 struct ti_sci_msg_req_get_status *req;
2804 struct ti_sci_info *info;
2805 struct ti_sci_xfer *xfer;
2806 struct device *dev;
2807 int ret = 0;
2808
2809 if (!handle)
2810 return -EINVAL;
2811 if (IS_ERR(handle))
2812 return PTR_ERR(handle);
2813
2814 info = handle_to_ti_sci_info(handle);
2815 dev = info->dev;
2816
2817 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
2818 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2819 sizeof(*req), sizeof(*resp));
2820 if (IS_ERR(xfer)) {
2821 ret = PTR_ERR(xfer);
2822 dev_err(dev, "Message alloc failed(%d)\n", ret);
2823 return ret;
2824 }
2825 req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
2826 req->processor_id = proc_id;
2827
2828 ret = ti_sci_do_xfer(info, xfer);
2829 if (ret) {
2830 dev_err(dev, "Mbox send fail %d\n", ret);
2831 goto fail;
2832 }
2833
2834 resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
2835
2836 if (!ti_sci_is_response_ack(resp)) {
2837 ret = -ENODEV;
2838 } else {
2839 *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
2840 (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
2841 TI_SCI_ADDR_HIGH_MASK);
2842 *cfg_flags = resp->config_flags;
2843 *ctrl_flags = resp->control_flags;
2844 *sts_flags = resp->status_flags;
2845 }
2846
2847 fail:
2848 ti_sci_put_one_xfer(&info->minfo, xfer);
2849
2850 return ret;
2851 }
2852
2853 /*
2854 * ti_sci_setup_ops() - Setup the operations structures
2855 * @info: pointer to TISCI pointer
2856 */
ti_sci_setup_ops(struct ti_sci_info *info)2857 static void ti_sci_setup_ops(struct ti_sci_info *info)
2858 {
2859 struct ti_sci_ops *ops = &info->handle.ops;
2860 struct ti_sci_core_ops *core_ops = &ops->core_ops;
2861 struct ti_sci_dev_ops *dops = &ops->dev_ops;
2862 struct ti_sci_clk_ops *cops = &ops->clk_ops;
2863 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
2864 struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
2865 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2866 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2867 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
2868 struct ti_sci_proc_ops *pops = &ops->proc_ops;
2869
2870 core_ops->reboot_device = ti_sci_cmd_core_reboot;
2871
2872 dops->get_device = ti_sci_cmd_get_device;
2873 dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
2874 dops->idle_device = ti_sci_cmd_idle_device;
2875 dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
2876 dops->put_device = ti_sci_cmd_put_device;
2877
2878 dops->is_valid = ti_sci_cmd_dev_is_valid;
2879 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2880 dops->is_idle = ti_sci_cmd_dev_is_idle;
2881 dops->is_stop = ti_sci_cmd_dev_is_stop;
2882 dops->is_on = ti_sci_cmd_dev_is_on;
2883 dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2884 dops->set_device_resets = ti_sci_cmd_set_device_resets;
2885 dops->get_device_resets = ti_sci_cmd_get_device_resets;
2886
2887 cops->get_clock = ti_sci_cmd_get_clock;
2888 cops->idle_clock = ti_sci_cmd_idle_clock;
2889 cops->put_clock = ti_sci_cmd_put_clock;
2890 cops->is_auto = ti_sci_cmd_clk_is_auto;
2891 cops->is_on = ti_sci_cmd_clk_is_on;
2892 cops->is_off = ti_sci_cmd_clk_is_off;
2893
2894 cops->set_parent = ti_sci_cmd_clk_set_parent;
2895 cops->get_parent = ti_sci_cmd_clk_get_parent;
2896 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2897
2898 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2899 cops->set_freq = ti_sci_cmd_clk_set_freq;
2900 cops->get_freq = ti_sci_cmd_clk_get_freq;
2901
2902 rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2903 rm_core_ops->get_range_from_shost =
2904 ti_sci_cmd_get_resource_range_from_shost;
2905
2906 iops->set_irq = ti_sci_cmd_set_irq;
2907 iops->set_event_map = ti_sci_cmd_set_event_map;
2908 iops->free_irq = ti_sci_cmd_free_irq;
2909 iops->free_event_map = ti_sci_cmd_free_event_map;
2910
2911 rops->config = ti_sci_cmd_ring_config;
2912 rops->get_config = ti_sci_cmd_ring_get_config;
2913
2914 psilops->pair = ti_sci_cmd_rm_psil_pair;
2915 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2916
2917 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2918 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2919 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
2920
2921 pops->request = ti_sci_cmd_proc_request;
2922 pops->release = ti_sci_cmd_proc_release;
2923 pops->handover = ti_sci_cmd_proc_handover;
2924 pops->set_config = ti_sci_cmd_proc_set_config;
2925 pops->set_control = ti_sci_cmd_proc_set_control;
2926 pops->get_status = ti_sci_cmd_proc_get_status;
2927 }
2928
2929 /**
2930 * ti_sci_get_handle() - Get the TI SCI handle for a device
2931 * @dev: Pointer to device for which we want SCI handle
2932 *
2933 * NOTE: The function does not track individual clients of the framework
2934 * and is expected to be maintained by caller of TI SCI protocol library.
2935 * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
2936 * Return: pointer to handle if successful, else:
2937 * -EPROBE_DEFER if the instance is not ready
2938 * -ENODEV if the required node handler is missing
2939 * -EINVAL if invalid conditions are encountered.
2940 */
ti_sci_get_handle(struct device *dev)2941 const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
2942 {
2943 struct device_node *ti_sci_np;
2944 struct list_head *p;
2945 struct ti_sci_handle *handle = NULL;
2946 struct ti_sci_info *info;
2947
2948 if (!dev) {
2949 pr_err("I need a device pointer\n");
2950 return ERR_PTR(-EINVAL);
2951 }
2952 ti_sci_np = of_get_parent(dev->of_node);
2953 if (!ti_sci_np) {
2954 dev_err(dev, "No OF information\n");
2955 return ERR_PTR(-EINVAL);
2956 }
2957
2958 mutex_lock(&ti_sci_list_mutex);
2959 list_for_each(p, &ti_sci_list) {
2960 info = list_entry(p, struct ti_sci_info, node);
2961 if (ti_sci_np == info->dev->of_node) {
2962 handle = &info->handle;
2963 info->users++;
2964 break;
2965 }
2966 }
2967 mutex_unlock(&ti_sci_list_mutex);
2968 of_node_put(ti_sci_np);
2969
2970 if (!handle)
2971 return ERR_PTR(-EPROBE_DEFER);
2972
2973 return handle;
2974 }
2975 EXPORT_SYMBOL_GPL(ti_sci_get_handle);
2976
2977 /**
2978 * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
2979 * @handle: Handle acquired by ti_sci_get_handle
2980 *
2981 * NOTE: The function does not track individual clients of the framework
2982 * and is expected to be maintained by caller of TI SCI protocol library.
2983 * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
2984 *
2985 * Return: 0 is successfully released
2986 * if an error pointer was passed, it returns the error value back,
2987 * if null was passed, it returns -EINVAL;
2988 */
ti_sci_put_handle(const struct ti_sci_handle *handle)2989 int ti_sci_put_handle(const struct ti_sci_handle *handle)
2990 {
2991 struct ti_sci_info *info;
2992
2993 if (IS_ERR(handle))
2994 return PTR_ERR(handle);
2995 if (!handle)
2996 return -EINVAL;
2997
2998 info = handle_to_ti_sci_info(handle);
2999 mutex_lock(&ti_sci_list_mutex);
3000 if (!WARN_ON(!info->users))
3001 info->users--;
3002 mutex_unlock(&ti_sci_list_mutex);
3003
3004 return 0;
3005 }
3006 EXPORT_SYMBOL_GPL(ti_sci_put_handle);
3007
devm_ti_sci_release(struct device *dev, void *res)3008 static void devm_ti_sci_release(struct device *dev, void *res)
3009 {
3010 const struct ti_sci_handle **ptr = res;
3011 const struct ti_sci_handle *handle = *ptr;
3012 int ret;
3013
3014 ret = ti_sci_put_handle(handle);
3015 if (ret)
3016 dev_err(dev, "failed to put handle %d\n", ret);
3017 }
3018
3019 /**
3020 * devm_ti_sci_get_handle() - Managed get handle
3021 * @dev: device for which we want SCI handle for.
3022 *
3023 * NOTE: This releases the handle once the device resources are
3024 * no longer needed. MUST NOT BE released with ti_sci_put_handle.
3025 * The function does not track individual clients of the framework
3026 * and is expected to be maintained by caller of TI SCI protocol library.
3027 *
3028 * Return: 0 if all went fine, else corresponding error.
3029 */
devm_ti_sci_get_handle(struct device *dev)3030 const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
3031 {
3032 const struct ti_sci_handle **ptr;
3033 const struct ti_sci_handle *handle;
3034
3035 ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3036 if (!ptr)
3037 return ERR_PTR(-ENOMEM);
3038 handle = ti_sci_get_handle(dev);
3039
3040 if (!IS_ERR(handle)) {
3041 *ptr = handle;
3042 devres_add(dev, ptr);
3043 } else {
3044 devres_free(ptr);
3045 }
3046
3047 return handle;
3048 }
3049 EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
3050
3051 /**
3052 * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
3053 * @np: device node
3054 * @property: property name containing phandle on TISCI node
3055 *
3056 * NOTE: The function does not track individual clients of the framework
3057 * and is expected to be maintained by caller of TI SCI protocol library.
3058 * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
3059 * Return: pointer to handle if successful, else:
3060 * -EPROBE_DEFER if the instance is not ready
3061 * -ENODEV if the required node handler is missing
3062 * -EINVAL if invalid conditions are encountered.
3063 */
ti_sci_get_by_phandle(struct device_node *np, const char *property)3064 const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
3065 const char *property)
3066 {
3067 struct ti_sci_handle *handle = NULL;
3068 struct device_node *ti_sci_np;
3069 struct ti_sci_info *info;
3070 struct list_head *p;
3071
3072 if (!np) {
3073 pr_err("I need a device pointer\n");
3074 return ERR_PTR(-EINVAL);
3075 }
3076
3077 ti_sci_np = of_parse_phandle(np, property, 0);
3078 if (!ti_sci_np)
3079 return ERR_PTR(-ENODEV);
3080
3081 mutex_lock(&ti_sci_list_mutex);
3082 list_for_each(p, &ti_sci_list) {
3083 info = list_entry(p, struct ti_sci_info, node);
3084 if (ti_sci_np == info->dev->of_node) {
3085 handle = &info->handle;
3086 info->users++;
3087 break;
3088 }
3089 }
3090 mutex_unlock(&ti_sci_list_mutex);
3091 of_node_put(ti_sci_np);
3092
3093 if (!handle)
3094 return ERR_PTR(-EPROBE_DEFER);
3095
3096 return handle;
3097 }
3098 EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
3099
3100 /**
3101 * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
3102 * @dev: Device pointer requesting TISCI handle
3103 * @property: property name containing phandle on TISCI node
3104 *
3105 * NOTE: This releases the handle once the device resources are
3106 * no longer needed. MUST NOT BE released with ti_sci_put_handle.
3107 * The function does not track individual clients of the framework
3108 * and is expected to be maintained by caller of TI SCI protocol library.
3109 *
3110 * Return: 0 if all went fine, else corresponding error.
3111 */
devm_ti_sci_get_by_phandle(struct device *dev, const char *property)3112 const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
3113 const char *property)
3114 {
3115 const struct ti_sci_handle *handle;
3116 const struct ti_sci_handle **ptr;
3117
3118 ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3119 if (!ptr)
3120 return ERR_PTR(-ENOMEM);
3121 handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
3122
3123 if (!IS_ERR(handle)) {
3124 *ptr = handle;
3125 devres_add(dev, ptr);
3126 } else {
3127 devres_free(ptr);
3128 }
3129
3130 return handle;
3131 }
3132 EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
3133
3134 /**
3135 * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
3136 * @res: Pointer to the TISCI resource
3137 *
3138 * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
3139 */
ti_sci_get_free_resource(struct ti_sci_resource *res)3140 u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
3141 {
3142 unsigned long flags;
3143 u16 set, free_bit;
3144
3145 raw_spin_lock_irqsave(&res->lock, flags);
3146 for (set = 0; set < res->sets; set++) {
3147 free_bit = find_first_zero_bit(res->desc[set].res_map,
3148 res->desc[set].num);
3149 if (free_bit != res->desc[set].num) {
3150 set_bit(free_bit, res->desc[set].res_map);
3151 raw_spin_unlock_irqrestore(&res->lock, flags);
3152 return res->desc[set].start + free_bit;
3153 }
3154 }
3155 raw_spin_unlock_irqrestore(&res->lock, flags);
3156
3157 return TI_SCI_RESOURCE_NULL;
3158 }
3159 EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
3160
3161 /**
3162 * ti_sci_release_resource() - Release a resource from TISCI resource.
3163 * @res: Pointer to the TISCI resource
3164 * @id: Resource id to be released.
3165 */
ti_sci_release_resource(struct ti_sci_resource *res, u16 id)3166 void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
3167 {
3168 unsigned long flags;
3169 u16 set;
3170
3171 raw_spin_lock_irqsave(&res->lock, flags);
3172 for (set = 0; set < res->sets; set++) {
3173 if (res->desc[set].start <= id &&
3174 (res->desc[set].num + res->desc[set].start) > id)
3175 clear_bit(id - res->desc[set].start,
3176 res->desc[set].res_map);
3177 }
3178 raw_spin_unlock_irqrestore(&res->lock, flags);
3179 }
3180 EXPORT_SYMBOL_GPL(ti_sci_release_resource);
3181
3182 /**
3183 * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
3184 * @res: Pointer to the TISCI resource
3185 *
3186 * Return: Total number of available resources.
3187 */
ti_sci_get_num_resources(struct ti_sci_resource *res)3188 u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
3189 {
3190 u32 set, count = 0;
3191
3192 for (set = 0; set < res->sets; set++)
3193 count += res->desc[set].num;
3194
3195 return count;
3196 }
3197 EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
3198
3199 /**
3200 * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device
3201 * @handle: TISCI handle
3202 * @dev: Device pointer to which the resource is assigned
3203 * @dev_id: TISCI device id to which the resource is assigned
3204 * @sub_types: Array of sub_types assigned corresponding to device
3205 * @sets: Number of sub_types
3206 *
3207 * Return: Pointer to ti_sci_resource if all went well else appropriate
3208 * error pointer.
3209 */
3210 static struct ti_sci_resource *
devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle, struct device *dev, u32 dev_id, u32 *sub_types, u32 sets)3211 devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
3212 struct device *dev, u32 dev_id, u32 *sub_types,
3213 u32 sets)
3214 {
3215 struct ti_sci_resource *res;
3216 bool valid_set = false;
3217 int i, ret;
3218
3219 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
3220 if (!res)
3221 return ERR_PTR(-ENOMEM);
3222
3223 res->sets = sets;
3224 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
3225 GFP_KERNEL);
3226 if (!res->desc)
3227 return ERR_PTR(-ENOMEM);
3228
3229 for (i = 0; i < res->sets; i++) {
3230 ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
3231 sub_types[i],
3232 &res->desc[i].start,
3233 &res->desc[i].num);
3234 if (ret) {
3235 dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
3236 dev_id, sub_types[i]);
3237 res->desc[i].start = 0;
3238 res->desc[i].num = 0;
3239 continue;
3240 }
3241
3242 dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
3243 dev_id, sub_types[i], res->desc[i].start,
3244 res->desc[i].num);
3245
3246 valid_set = true;
3247 res->desc[i].res_map =
3248 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
3249 sizeof(*res->desc[i].res_map), GFP_KERNEL);
3250 if (!res->desc[i].res_map)
3251 return ERR_PTR(-ENOMEM);
3252 }
3253 raw_spin_lock_init(&res->lock);
3254
3255 if (valid_set)
3256 return res;
3257
3258 return ERR_PTR(-EINVAL);
3259 }
3260
3261 /**
3262 * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
3263 * @handle: TISCI handle
3264 * @dev: Device pointer to which the resource is assigned
3265 * @dev_id: TISCI device id to which the resource is assigned
3266 * @of_prop: property name by which the resource are represented
3267 *
3268 * Return: Pointer to ti_sci_resource if all went well else appropriate
3269 * error pointer.
3270 */
3271 struct ti_sci_resource *
devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, struct device *dev, u32 dev_id, char *of_prop)3272 devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
3273 struct device *dev, u32 dev_id, char *of_prop)
3274 {
3275 struct ti_sci_resource *res;
3276 u32 *sub_types;
3277 int sets;
3278
3279 sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
3280 sizeof(u32));
3281 if (sets < 0) {
3282 dev_err(dev, "%s resource type ids not available\n", of_prop);
3283 return ERR_PTR(sets);
3284 }
3285
3286 sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
3287 if (!sub_types)
3288 return ERR_PTR(-ENOMEM);
3289
3290 of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
3291 res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
3292 sets);
3293
3294 kfree(sub_types);
3295 return res;
3296 }
3297 EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
3298
3299 /**
3300 * devm_ti_sci_get_resource() - Get a resource range assigned to the device
3301 * @handle: TISCI handle
3302 * @dev: Device pointer to which the resource is assigned
3303 * @dev_id: TISCI device id to which the resource is assigned
3304 * @suub_type: TISCI resource subytpe representing the resource.
3305 *
3306 * Return: Pointer to ti_sci_resource if all went well else appropriate
3307 * error pointer.
3308 */
3309 struct ti_sci_resource *
devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, u32 dev_id, u32 sub_type)3310 devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
3311 u32 dev_id, u32 sub_type)
3312 {
3313 return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
3314 }
3315 EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
3316
tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, void *cmd)3317 static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
3318 void *cmd)
3319 {
3320 struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
3321 const struct ti_sci_handle *handle = &info->handle;
3322
3323 ti_sci_cmd_core_reboot(handle);
3324
3325 /* call fail OR pass, we should not be here in the first place */
3326 return NOTIFY_BAD;
3327 }
3328
3329 /* Description for K2G */
3330 static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3331 .default_host_id = 2,
3332 /* Conservative duration */
3333 .max_rx_timeout_ms = 1000,
3334 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3335 .max_msgs = 20,
3336 .max_msg_size = 64,
3337 };
3338
3339 /* Description for AM654 */
3340 static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3341 .default_host_id = 12,
3342 /* Conservative duration */
3343 .max_rx_timeout_ms = 10000,
3344 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3345 .max_msgs = 20,
3346 .max_msg_size = 60,
3347 };
3348
3349 static const struct of_device_id ti_sci_of_match[] = {
3350 {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
3351 {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
3352 { /* Sentinel */ },
3353 };
3354 MODULE_DEVICE_TABLE(of, ti_sci_of_match);
3355
ti_sci_probe(struct platform_device *pdev)3356 static int ti_sci_probe(struct platform_device *pdev)
3357 {
3358 struct device *dev = &pdev->dev;
3359 const struct of_device_id *of_id;
3360 const struct ti_sci_desc *desc;
3361 struct ti_sci_xfer *xfer;
3362 struct ti_sci_info *info = NULL;
3363 struct ti_sci_xfers_info *minfo;
3364 struct mbox_client *cl;
3365 int ret = -EINVAL;
3366 int i;
3367 int reboot = 0;
3368 u32 h_id;
3369
3370 of_id = of_match_device(ti_sci_of_match, dev);
3371 if (!of_id) {
3372 dev_err(dev, "OF data missing\n");
3373 return -EINVAL;
3374 }
3375 desc = of_id->data;
3376
3377 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
3378 if (!info)
3379 return -ENOMEM;
3380
3381 info->dev = dev;
3382 info->desc = desc;
3383 ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
3384 /* if the property is not present in DT, use a default from desc */
3385 if (ret < 0) {
3386 info->host_id = info->desc->default_host_id;
3387 } else {
3388 if (!h_id) {
3389 dev_warn(dev, "Host ID 0 is reserved for firmware\n");
3390 info->host_id = info->desc->default_host_id;
3391 } else {
3392 info->host_id = h_id;
3393 }
3394 }
3395
3396 reboot = of_property_read_bool(dev->of_node,
3397 "ti,system-reboot-controller");
3398 INIT_LIST_HEAD(&info->node);
3399 minfo = &info->minfo;
3400
3401 /*
3402 * Pre-allocate messages
3403 * NEVER allocate more than what we can indicate in hdr.seq
3404 * if we have data description bug, force a fix..
3405 */
3406 if (WARN_ON(desc->max_msgs >=
3407 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
3408 return -EINVAL;
3409
3410 minfo->xfer_block = devm_kcalloc(dev,
3411 desc->max_msgs,
3412 sizeof(*minfo->xfer_block),
3413 GFP_KERNEL);
3414 if (!minfo->xfer_block)
3415 return -ENOMEM;
3416
3417 minfo->xfer_alloc_table = devm_kcalloc(dev,
3418 BITS_TO_LONGS(desc->max_msgs),
3419 sizeof(unsigned long),
3420 GFP_KERNEL);
3421 if (!minfo->xfer_alloc_table)
3422 return -ENOMEM;
3423 bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
3424
3425 /* Pre-initialize the buffer pointer to pre-allocated buffers */
3426 for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
3427 xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
3428 GFP_KERNEL);
3429 if (!xfer->xfer_buf)
3430 return -ENOMEM;
3431
3432 xfer->tx_message.buf = xfer->xfer_buf;
3433 init_completion(&xfer->done);
3434 }
3435
3436 ret = ti_sci_debugfs_create(pdev, info);
3437 if (ret)
3438 dev_warn(dev, "Failed to create debug file\n");
3439
3440 platform_set_drvdata(pdev, info);
3441
3442 cl = &info->cl;
3443 cl->dev = dev;
3444 cl->tx_block = false;
3445 cl->rx_callback = ti_sci_rx_callback;
3446 cl->knows_txdone = true;
3447
3448 spin_lock_init(&minfo->xfer_lock);
3449 sema_init(&minfo->sem_xfer_count, desc->max_msgs);
3450
3451 info->chan_rx = mbox_request_channel_byname(cl, "rx");
3452 if (IS_ERR(info->chan_rx)) {
3453 ret = PTR_ERR(info->chan_rx);
3454 goto out;
3455 }
3456
3457 info->chan_tx = mbox_request_channel_byname(cl, "tx");
3458 if (IS_ERR(info->chan_tx)) {
3459 ret = PTR_ERR(info->chan_tx);
3460 goto out;
3461 }
3462 ret = ti_sci_cmd_get_revision(info);
3463 if (ret) {
3464 dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
3465 goto out;
3466 }
3467
3468 ti_sci_setup_ops(info);
3469
3470 if (reboot) {
3471 info->nb.notifier_call = tisci_reboot_handler;
3472 info->nb.priority = 128;
3473
3474 ret = register_restart_handler(&info->nb);
3475 if (ret) {
3476 dev_err(dev, "reboot registration fail(%d)\n", ret);
3477 return ret;
3478 }
3479 }
3480
3481 dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
3482 info->handle.version.abi_major, info->handle.version.abi_minor,
3483 info->handle.version.firmware_revision,
3484 info->handle.version.firmware_description);
3485
3486 mutex_lock(&ti_sci_list_mutex);
3487 list_add_tail(&info->node, &ti_sci_list);
3488 mutex_unlock(&ti_sci_list_mutex);
3489
3490 return of_platform_populate(dev->of_node, NULL, NULL, dev);
3491 out:
3492 if (!IS_ERR(info->chan_tx))
3493 mbox_free_channel(info->chan_tx);
3494 if (!IS_ERR(info->chan_rx))
3495 mbox_free_channel(info->chan_rx);
3496 debugfs_remove(info->d);
3497 return ret;
3498 }
3499
3500 static struct platform_driver ti_sci_driver = {
3501 .probe = ti_sci_probe,
3502 .driver = {
3503 .name = "ti-sci",
3504 .of_match_table = of_match_ptr(ti_sci_of_match),
3505 .suppress_bind_attrs = true,
3506 },
3507 };
3508 module_platform_driver(ti_sci_driver);
3509
3510 MODULE_LICENSE("GPL v2");
3511 MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
3512 MODULE_AUTHOR("Nishanth Menon");
3513 MODULE_ALIAS("platform:ti-sci");
3514