1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Texas Instruments' Message Manager Driver 4 * 5 * Copyright (C) 2015-2017 Texas Instruments Incorporated - https://www.ti.com/ 6 * Nishanth Menon 7 */ 8 9#define pr_fmt(fmt) "%s: " fmt, __func__ 10 11#include <linux/device.h> 12#include <linux/interrupt.h> 13#include <linux/io.h> 14#include <linux/kernel.h> 15#include <linux/mailbox_controller.h> 16#include <linux/module.h> 17#include <linux/of_device.h> 18#include <linux/of.h> 19#include <linux/of_irq.h> 20#include <linux/platform_device.h> 21#include <linux/soc/ti/ti-msgmgr.h> 22 23#define Q_DATA_OFFSET(proxy, queue, reg) \ 24 ((0x10000 * (proxy)) + (0x80 * (queue)) + ((reg) * 4)) 25#define Q_STATE_OFFSET(queue) ((queue) * 0x4) 26#define Q_STATE_ENTRY_COUNT_MASK (0xFFF000) 27 28#define SPROXY_THREAD_OFFSET(tid) (0x1000 * (tid)) 29#define SPROXY_THREAD_DATA_OFFSET(tid, reg) \ 30 (SPROXY_THREAD_OFFSET(tid) + ((reg) * 0x4) + 0x4) 31 32#define SPROXY_THREAD_STATUS_OFFSET(tid) (SPROXY_THREAD_OFFSET(tid)) 33 34#define SPROXY_THREAD_STATUS_COUNT_MASK (0xFF) 35 36#define SPROXY_THREAD_CTRL_OFFSET(tid) (0x1000 + SPROXY_THREAD_OFFSET(tid)) 37#define SPROXY_THREAD_CTRL_DIR_MASK (0x1 << 31) 38 39/** 40 * struct ti_msgmgr_valid_queue_desc - SoC valid queues meant for this processor 41 * @queue_id: Queue Number for this path 42 * @proxy_id: Proxy ID representing the processor in SoC 43 * @is_tx: Is this a receive path? 44 */ 45struct ti_msgmgr_valid_queue_desc { 46 u8 queue_id; 47 u8 proxy_id; 48 bool is_tx; 49}; 50 51/** 52 * struct ti_msgmgr_desc - Description of message manager integration 53 * @queue_count: Number of Queues 54 * @max_message_size: Message size in bytes 55 * @max_messages: Number of messages 56 * @data_first_reg: First data register for proxy data region 57 * @data_last_reg: Last data register for proxy data region 58 * @status_cnt_mask: Mask for getting the status value 59 * @status_err_mask: Mask for getting the error value, if applicable 60 * @tx_polled: Do I need to use polled mechanism for tx 61 * @tx_poll_timeout_ms: Timeout in ms if polled 62 * @valid_queues: List of Valid queues that the processor can access 63 * @data_region_name: Name of the proxy data region 64 * @status_region_name: Name of the proxy status region 65 * @ctrl_region_name: Name of the proxy control region 66 * @num_valid_queues: Number of valid queues 67 * @is_sproxy: Is this an Secure Proxy instance? 68 * 69 * This structure is used in of match data to describe how integration 70 * for a specific compatible SoC is done. 71 */ 72struct ti_msgmgr_desc { 73 u8 queue_count; 74 u8 max_message_size; 75 u8 max_messages; 76 u8 data_first_reg; 77 u8 data_last_reg; 78 u32 status_cnt_mask; 79 u32 status_err_mask; 80 bool tx_polled; 81 int tx_poll_timeout_ms; 82 const struct ti_msgmgr_valid_queue_desc *valid_queues; 83 const char *data_region_name; 84 const char *status_region_name; 85 const char *ctrl_region_name; 86 int num_valid_queues; 87 bool is_sproxy; 88}; 89 90/** 91 * struct ti_queue_inst - Description of a queue instance 92 * @name: Queue Name 93 * @queue_id: Queue Identifier as mapped on SoC 94 * @proxy_id: Proxy Identifier as mapped on SoC 95 * @irq: IRQ for Rx Queue 96 * @is_tx: 'true' if transmit queue, else, 'false' 97 * @queue_buff_start: First register of Data Buffer 98 * @queue_buff_end: Last (or confirmation) register of Data buffer 99 * @queue_state: Queue status register 100 * @queue_ctrl: Queue Control register 101 * @chan: Mailbox channel 102 * @rx_buff: Receive buffer pointer allocated at probe, max_message_size 103 */ 104struct ti_queue_inst { 105 char name[30]; 106 u8 queue_id; 107 u8 proxy_id; 108 int irq; 109 bool is_tx; 110 void __iomem *queue_buff_start; 111 void __iomem *queue_buff_end; 112 void __iomem *queue_state; 113 void __iomem *queue_ctrl; 114 struct mbox_chan *chan; 115 u32 *rx_buff; 116}; 117 118/** 119 * struct ti_msgmgr_inst - Description of a Message Manager Instance 120 * @dev: device pointer corresponding to the Message Manager instance 121 * @desc: Description of the SoC integration 122 * @queue_proxy_region: Queue proxy region where queue buffers are located 123 * @queue_state_debug_region: Queue status register regions 124 * @queue_ctrl_region: Queue Control register regions 125 * @num_valid_queues: Number of valid queues defined for the processor 126 * Note: other queues are probably reserved for other processors 127 * in the SoC. 128 * @qinsts: Array of valid Queue Instances for the Processor 129 * @mbox: Mailbox Controller 130 * @chans: Array for channels corresponding to the Queue Instances. 131 */ 132struct ti_msgmgr_inst { 133 struct device *dev; 134 const struct ti_msgmgr_desc *desc; 135 void __iomem *queue_proxy_region; 136 void __iomem *queue_state_debug_region; 137 void __iomem *queue_ctrl_region; 138 u8 num_valid_queues; 139 struct ti_queue_inst *qinsts; 140 struct mbox_controller mbox; 141 struct mbox_chan *chans; 142}; 143 144/** 145 * ti_msgmgr_queue_get_num_messages() - Get the number of pending messages 146 * @d: Description of message manager 147 * @qinst: Queue instance for which we check the number of pending messages 148 * 149 * Return: number of messages pending in the queue (0 == no pending messages) 150 */ 151static inline int 152ti_msgmgr_queue_get_num_messages(const struct ti_msgmgr_desc *d, 153 struct ti_queue_inst *qinst) 154{ 155 u32 val; 156 u32 status_cnt_mask = d->status_cnt_mask; 157 158 /* 159 * We cannot use relaxed operation here - update may happen 160 * real-time. 161 */ 162 val = readl(qinst->queue_state) & status_cnt_mask; 163 val >>= __ffs(status_cnt_mask); 164 165 return val; 166} 167 168/** 169 * ti_msgmgr_queue_is_error() - Check to see if there is queue error 170 * @d: Description of message manager 171 * @qinst: Queue instance for which we check the number of pending messages 172 * 173 * Return: true if error, else false 174 */ 175static inline bool ti_msgmgr_queue_is_error(const struct ti_msgmgr_desc *d, 176 struct ti_queue_inst *qinst) 177{ 178 u32 val; 179 180 /* Msgmgr has no error detection */ 181 if (!d->is_sproxy) 182 return false; 183 184 /* 185 * We cannot use relaxed operation here - update may happen 186 * real-time. 187 */ 188 val = readl(qinst->queue_state) & d->status_err_mask; 189 190 return val ? true : false; 191} 192 193/** 194 * ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue 195 * @irq: Interrupt number 196 * @p: Channel Pointer 197 * 198 * Return: -EINVAL if there is no instance 199 * IRQ_NONE if the interrupt is not ours. 200 * IRQ_HANDLED if the rx interrupt was successfully handled. 201 */ 202static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p) 203{ 204 struct mbox_chan *chan = p; 205 struct device *dev = chan->mbox->dev; 206 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); 207 struct ti_queue_inst *qinst = chan->con_priv; 208 const struct ti_msgmgr_desc *desc; 209 int msg_count, num_words; 210 struct ti_msgmgr_message message; 211 void __iomem *data_reg; 212 u32 *word_data; 213 214 if (WARN_ON(!inst)) { 215 dev_err(dev, "no platform drv data??\n"); 216 return -EINVAL; 217 } 218 219 /* Do I have an invalid interrupt source? */ 220 if (qinst->is_tx) { 221 dev_err(dev, "Cannot handle rx interrupt on tx channel %s\n", 222 qinst->name); 223 return IRQ_NONE; 224 } 225 226 desc = inst->desc; 227 if (ti_msgmgr_queue_is_error(desc, qinst)) { 228 dev_err(dev, "Error on Rx channel %s\n", qinst->name); 229 return IRQ_NONE; 230 } 231 232 /* Do I actually have messages to read? */ 233 msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst); 234 if (!msg_count) { 235 /* Shared IRQ? */ 236 dev_dbg(dev, "Spurious event - 0 pending data!\n"); 237 return IRQ_NONE; 238 } 239 240 /* 241 * I have no idea about the protocol being used to communicate with the 242 * remote producer - 0 could be valid data, so I wont make a judgement 243 * of how many bytes I should be reading. Let the client figure this 244 * out.. I just read the full message and pass it on.. 245 */ 246 message.len = desc->max_message_size; 247 message.buf = (u8 *)qinst->rx_buff; 248 249 /* 250 * NOTE about register access involved here: 251 * the hardware block is implemented with 32bit access operations and no 252 * support for data splitting. We don't want the hardware to misbehave 253 * with sub 32bit access - For example: if the last register read is 254 * split into byte wise access, it can result in the queue getting 255 * stuck or indeterminate behavior. An out of order read operation may 256 * result in weird data results as well. 257 * Hence, we do not use memcpy_fromio or __ioread32_copy here, instead 258 * we depend on readl for the purpose. 259 * 260 * Also note that the final register read automatically marks the 261 * queue message as read. 262 */ 263 for (data_reg = qinst->queue_buff_start, word_data = qinst->rx_buff, 264 num_words = (desc->max_message_size / sizeof(u32)); 265 num_words; num_words--, data_reg += sizeof(u32), word_data++) 266 *word_data = readl(data_reg); 267 268 /* 269 * Last register read automatically clears the IRQ if only 1 message 270 * is pending - so send the data up the stack.. 271 * NOTE: Client is expected to be as optimal as possible, since 272 * we invoke the handler in IRQ context. 273 */ 274 mbox_chan_received_data(chan, (void *)&message); 275 276 return IRQ_HANDLED; 277} 278 279/** 280 * ti_msgmgr_queue_peek_data() - Peek to see if there are any rx messages. 281 * @chan: Channel Pointer 282 * 283 * Return: 'true' if there is pending rx data, 'false' if there is none. 284 */ 285static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan) 286{ 287 struct ti_queue_inst *qinst = chan->con_priv; 288 struct device *dev = chan->mbox->dev; 289 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); 290 const struct ti_msgmgr_desc *desc = inst->desc; 291 int msg_count; 292 293 if (qinst->is_tx) 294 return false; 295 296 if (ti_msgmgr_queue_is_error(desc, qinst)) { 297 dev_err(dev, "Error on channel %s\n", qinst->name); 298 return false; 299 } 300 301 msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst); 302 303 return msg_count ? true : false; 304} 305 306/** 307 * ti_msgmgr_last_tx_done() - See if all the tx messages are sent 308 * @chan: Channel pointer 309 * 310 * Return: 'true' is no pending tx data, 'false' if there are any. 311 */ 312static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan) 313{ 314 struct ti_queue_inst *qinst = chan->con_priv; 315 struct device *dev = chan->mbox->dev; 316 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); 317 const struct ti_msgmgr_desc *desc = inst->desc; 318 int msg_count; 319 320 if (!qinst->is_tx) 321 return false; 322 323 if (ti_msgmgr_queue_is_error(desc, qinst)) { 324 dev_err(dev, "Error on channel %s\n", qinst->name); 325 return false; 326 } 327 328 msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst); 329 330 if (desc->is_sproxy) { 331 /* In secure proxy, msg_count indicates how many we can send */ 332 return msg_count ? true : false; 333 } 334 335 /* if we have any messages pending.. */ 336 return msg_count ? false : true; 337} 338 339/** 340 * ti_msgmgr_send_data() - Send data 341 * @chan: Channel Pointer 342 * @data: ti_msgmgr_message * Message Pointer 343 * 344 * Return: 0 if all goes good, else appropriate error messages. 345 */ 346static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data) 347{ 348 struct device *dev = chan->mbox->dev; 349 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); 350 const struct ti_msgmgr_desc *desc; 351 struct ti_queue_inst *qinst = chan->con_priv; 352 int num_words, trail_bytes; 353 struct ti_msgmgr_message *message = data; 354 void __iomem *data_reg; 355 u32 *word_data; 356 357 if (WARN_ON(!inst)) { 358 dev_err(dev, "no platform drv data??\n"); 359 return -EINVAL; 360 } 361 desc = inst->desc; 362 363 if (ti_msgmgr_queue_is_error(desc, qinst)) { 364 dev_err(dev, "Error on channel %s\n", qinst->name); 365 return false; 366 } 367 368 if (desc->max_message_size < message->len) { 369 dev_err(dev, "Queue %s message length %zu > max %d\n", 370 qinst->name, message->len, desc->max_message_size); 371 return -EINVAL; 372 } 373 374 /* NOTE: Constraints similar to rx path exists here as well */ 375 for (data_reg = qinst->queue_buff_start, 376 num_words = message->len / sizeof(u32), 377 word_data = (u32 *)message->buf; 378 num_words; num_words--, data_reg += sizeof(u32), word_data++) 379 writel(*word_data, data_reg); 380 381 trail_bytes = message->len % sizeof(u32); 382 if (trail_bytes) { 383 u32 data_trail = *word_data; 384 385 /* Ensure all unused data is 0 */ 386 data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes)); 387 writel(data_trail, data_reg); 388 data_reg += sizeof(u32); 389 } 390 391 /* 392 * 'data_reg' indicates next register to write. If we did not already 393 * write on tx complete reg(last reg), we must do so for transmit 394 * In addition, we also need to make sure all intermediate data 395 * registers(if any required), are reset to 0 for TISCI backward 396 * compatibility to be maintained. 397 */ 398 while (data_reg <= qinst->queue_buff_end) { 399 writel(0, data_reg); 400 data_reg += sizeof(u32); 401 } 402 403 return 0; 404} 405 406/** 407 * ti_msgmgr_queue_rx_irq_req() - RX IRQ request 408 * @dev: device pointer 409 * @d: descriptor for ti_msgmgr 410 * @qinst: Queue instance 411 * @chan: Channel pointer 412 */ 413static int ti_msgmgr_queue_rx_irq_req(struct device *dev, 414 const struct ti_msgmgr_desc *d, 415 struct ti_queue_inst *qinst, 416 struct mbox_chan *chan) 417{ 418 int ret = 0; 419 char of_rx_irq_name[7]; 420 struct device_node *np; 421 422 snprintf(of_rx_irq_name, sizeof(of_rx_irq_name), 423 "rx_%03d", d->is_sproxy ? qinst->proxy_id : qinst->queue_id); 424 425 /* Get the IRQ if not found */ 426 if (qinst->irq < 0) { 427 np = of_node_get(dev->of_node); 428 if (!np) 429 return -ENODATA; 430 qinst->irq = of_irq_get_byname(np, of_rx_irq_name); 431 of_node_put(np); 432 433 if (qinst->irq < 0) { 434 dev_err(dev, 435 "QID %d PID %d:No IRQ[%s]: %d\n", 436 qinst->queue_id, qinst->proxy_id, 437 of_rx_irq_name, qinst->irq); 438 return qinst->irq; 439 } 440 } 441 442 /* With the expectation that the IRQ might be shared in SoC */ 443 ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt, 444 IRQF_SHARED, qinst->name, chan); 445 if (ret) { 446 dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n", 447 qinst->irq, qinst->name, ret); 448 } 449 450 return ret; 451} 452 453/** 454 * ti_msgmgr_queue_startup() - Startup queue 455 * @chan: Channel pointer 456 * 457 * Return: 0 if all goes good, else return corresponding error message 458 */ 459static int ti_msgmgr_queue_startup(struct mbox_chan *chan) 460{ 461 struct device *dev = chan->mbox->dev; 462 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); 463 struct ti_queue_inst *qinst = chan->con_priv; 464 const struct ti_msgmgr_desc *d = inst->desc; 465 int ret; 466 int msg_count; 467 468 /* 469 * If sproxy is starting and can send messages, we are a Tx thread, 470 * else Rx 471 */ 472 if (d->is_sproxy) { 473 qinst->is_tx = (readl(qinst->queue_ctrl) & 474 SPROXY_THREAD_CTRL_DIR_MASK) ? false : true; 475 476 msg_count = ti_msgmgr_queue_get_num_messages(d, qinst); 477 478 if (!msg_count && qinst->is_tx) { 479 dev_err(dev, "%s: Cannot transmit with 0 credits!\n", 480 qinst->name); 481 return -EINVAL; 482 } 483 } 484 485 if (!qinst->is_tx) { 486 /* Allocate usage buffer for rx */ 487 qinst->rx_buff = kzalloc(d->max_message_size, GFP_KERNEL); 488 if (!qinst->rx_buff) 489 return -ENOMEM; 490 /* Request IRQ */ 491 ret = ti_msgmgr_queue_rx_irq_req(dev, d, qinst, chan); 492 if (ret) { 493 kfree(qinst->rx_buff); 494 return ret; 495 } 496 } 497 498 return 0; 499} 500 501/** 502 * ti_msgmgr_queue_shutdown() - Shutdown the queue 503 * @chan: Channel pointer 504 */ 505static void ti_msgmgr_queue_shutdown(struct mbox_chan *chan) 506{ 507 struct ti_queue_inst *qinst = chan->con_priv; 508 509 if (!qinst->is_tx) { 510 free_irq(qinst->irq, chan); 511 kfree(qinst->rx_buff); 512 } 513} 514 515/** 516 * ti_msgmgr_of_xlate() - Translation of phandle to queue 517 * @mbox: Mailbox controller 518 * @p: phandle pointer 519 * 520 * Return: Mailbox channel corresponding to the queue, else return error 521 * pointer. 522 */ 523static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox, 524 const struct of_phandle_args *p) 525{ 526 struct ti_msgmgr_inst *inst; 527 int req_qid, req_pid; 528 struct ti_queue_inst *qinst; 529 const struct ti_msgmgr_desc *d; 530 int i, ncells; 531 532 inst = container_of(mbox, struct ti_msgmgr_inst, mbox); 533 if (WARN_ON(!inst)) 534 return ERR_PTR(-EINVAL); 535 536 d = inst->desc; 537 538 if (d->is_sproxy) 539 ncells = 1; 540 else 541 ncells = 2; 542 if (p->args_count != ncells) { 543 dev_err(inst->dev, "Invalid arguments in dt[%d]. Must be %d\n", 544 p->args_count, ncells); 545 return ERR_PTR(-EINVAL); 546 } 547 if (ncells == 1) { 548 req_qid = 0; 549 req_pid = p->args[0]; 550 } else { 551 req_qid = p->args[0]; 552 req_pid = p->args[1]; 553 } 554 555 if (d->is_sproxy) { 556 if (req_pid >= d->num_valid_queues) 557 goto err; 558 qinst = &inst->qinsts[req_pid]; 559 return qinst->chan; 560 } 561 562 for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; 563 i++, qinst++) { 564 if (req_qid == qinst->queue_id && req_pid == qinst->proxy_id) 565 return qinst->chan; 566 } 567 568err: 569 dev_err(inst->dev, "Queue ID %d, Proxy ID %d is wrong on %pOFn\n", 570 req_qid, req_pid, p->np); 571 return ERR_PTR(-ENOENT); 572} 573 574/** 575 * ti_msgmgr_queue_setup() - Setup data structures for each queue instance 576 * @idx: index of the queue 577 * @dev: pointer to the message manager device 578 * @np: pointer to the of node 579 * @inst: Queue instance pointer 580 * @d: Message Manager instance description data 581 * @qd: Queue description data 582 * @qinst: Queue instance pointer 583 * @chan: pointer to mailbox channel 584 * 585 * Return: 0 if all went well, else return corresponding error 586 */ 587static int ti_msgmgr_queue_setup(int idx, struct device *dev, 588 struct device_node *np, 589 struct ti_msgmgr_inst *inst, 590 const struct ti_msgmgr_desc *d, 591 const struct ti_msgmgr_valid_queue_desc *qd, 592 struct ti_queue_inst *qinst, 593 struct mbox_chan *chan) 594{ 595 char *dir; 596 597 qinst->proxy_id = qd->proxy_id; 598 qinst->queue_id = qd->queue_id; 599 600 if (qinst->queue_id > d->queue_count) { 601 dev_err(dev, "Queue Data [idx=%d] queuid %d > %d\n", 602 idx, qinst->queue_id, d->queue_count); 603 return -ERANGE; 604 } 605 606 if (d->is_sproxy) { 607 qinst->queue_buff_start = inst->queue_proxy_region + 608 SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id, 609 d->data_first_reg); 610 qinst->queue_buff_end = inst->queue_proxy_region + 611 SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id, 612 d->data_last_reg); 613 qinst->queue_state = inst->queue_state_debug_region + 614 SPROXY_THREAD_STATUS_OFFSET(qinst->proxy_id); 615 qinst->queue_ctrl = inst->queue_ctrl_region + 616 SPROXY_THREAD_CTRL_OFFSET(qinst->proxy_id); 617 618 /* XXX: DONOT read registers here!.. Some may be unusable */ 619 dir = "thr"; 620 snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d", 621 dev_name(dev), dir, qinst->proxy_id); 622 } else { 623 qinst->queue_buff_start = inst->queue_proxy_region + 624 Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, 625 d->data_first_reg); 626 qinst->queue_buff_end = inst->queue_proxy_region + 627 Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, 628 d->data_last_reg); 629 qinst->queue_state = 630 inst->queue_state_debug_region + 631 Q_STATE_OFFSET(qinst->queue_id); 632 qinst->is_tx = qd->is_tx; 633 dir = qinst->is_tx ? "tx" : "rx"; 634 snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d", 635 dev_name(dev), dir, qinst->queue_id, qinst->proxy_id); 636 } 637 638 qinst->chan = chan; 639 640 /* Setup an error value for IRQ - Lazy allocation */ 641 qinst->irq = -EINVAL; 642 643 chan->con_priv = qinst; 644 645 dev_dbg(dev, "[%d] qidx=%d pidx=%d irq=%d q_s=%p q_e = %p\n", 646 idx, qinst->queue_id, qinst->proxy_id, qinst->irq, 647 qinst->queue_buff_start, qinst->queue_buff_end); 648 return 0; 649} 650 651/* Queue operations */ 652static const struct mbox_chan_ops ti_msgmgr_chan_ops = { 653 .startup = ti_msgmgr_queue_startup, 654 .shutdown = ti_msgmgr_queue_shutdown, 655 .peek_data = ti_msgmgr_queue_peek_data, 656 .last_tx_done = ti_msgmgr_last_tx_done, 657 .send_data = ti_msgmgr_send_data, 658}; 659 660/* Keystone K2G SoC integration details */ 661static const struct ti_msgmgr_valid_queue_desc k2g_valid_queues[] = { 662 {.queue_id = 0, .proxy_id = 0, .is_tx = true,}, 663 {.queue_id = 1, .proxy_id = 0, .is_tx = true,}, 664 {.queue_id = 2, .proxy_id = 0, .is_tx = true,}, 665 {.queue_id = 3, .proxy_id = 0, .is_tx = true,}, 666 {.queue_id = 5, .proxy_id = 2, .is_tx = false,}, 667 {.queue_id = 56, .proxy_id = 1, .is_tx = true,}, 668 {.queue_id = 57, .proxy_id = 2, .is_tx = false,}, 669 {.queue_id = 58, .proxy_id = 3, .is_tx = true,}, 670 {.queue_id = 59, .proxy_id = 4, .is_tx = true,}, 671 {.queue_id = 60, .proxy_id = 5, .is_tx = true,}, 672 {.queue_id = 61, .proxy_id = 6, .is_tx = true,}, 673}; 674 675static const struct ti_msgmgr_desc k2g_desc = { 676 .queue_count = 64, 677 .max_message_size = 64, 678 .max_messages = 128, 679 .data_region_name = "queue_proxy_region", 680 .status_region_name = "queue_state_debug_region", 681 .data_first_reg = 16, 682 .data_last_reg = 31, 683 .status_cnt_mask = Q_STATE_ENTRY_COUNT_MASK, 684 .tx_polled = false, 685 .valid_queues = k2g_valid_queues, 686 .num_valid_queues = ARRAY_SIZE(k2g_valid_queues), 687 .is_sproxy = false, 688}; 689 690static const struct ti_msgmgr_desc am654_desc = { 691 .queue_count = 190, 692 .num_valid_queues = 190, 693 .max_message_size = 60, 694 .data_region_name = "target_data", 695 .status_region_name = "rt", 696 .ctrl_region_name = "scfg", 697 .data_first_reg = 0, 698 .data_last_reg = 14, 699 .status_cnt_mask = SPROXY_THREAD_STATUS_COUNT_MASK, 700 .tx_polled = false, 701 .is_sproxy = true, 702}; 703 704static const struct of_device_id ti_msgmgr_of_match[] = { 705 {.compatible = "ti,k2g-message-manager", .data = &k2g_desc}, 706 {.compatible = "ti,am654-secure-proxy", .data = &am654_desc}, 707 { /* Sentinel */ } 708}; 709 710MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match); 711 712static int ti_msgmgr_probe(struct platform_device *pdev) 713{ 714 struct device *dev = &pdev->dev; 715 const struct of_device_id *of_id; 716 struct device_node *np; 717 struct resource *res; 718 const struct ti_msgmgr_desc *desc; 719 struct ti_msgmgr_inst *inst; 720 struct ti_queue_inst *qinst; 721 struct mbox_controller *mbox; 722 struct mbox_chan *chans; 723 int queue_count; 724 int i; 725 int ret = -EINVAL; 726 const struct ti_msgmgr_valid_queue_desc *queue_desc; 727 728 if (!dev->of_node) { 729 dev_err(dev, "no OF information\n"); 730 return -EINVAL; 731 } 732 np = dev->of_node; 733 734 of_id = of_match_device(ti_msgmgr_of_match, dev); 735 if (!of_id) { 736 dev_err(dev, "OF data missing\n"); 737 return -EINVAL; 738 } 739 desc = of_id->data; 740 741 inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL); 742 if (!inst) 743 return -ENOMEM; 744 745 inst->dev = dev; 746 inst->desc = desc; 747 748 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 749 desc->data_region_name); 750 inst->queue_proxy_region = devm_ioremap_resource(dev, res); 751 if (IS_ERR(inst->queue_proxy_region)) 752 return PTR_ERR(inst->queue_proxy_region); 753 754 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 755 desc->status_region_name); 756 inst->queue_state_debug_region = devm_ioremap_resource(dev, res); 757 if (IS_ERR(inst->queue_state_debug_region)) 758 return PTR_ERR(inst->queue_state_debug_region); 759 760 if (desc->is_sproxy) { 761 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 762 desc->ctrl_region_name); 763 inst->queue_ctrl_region = devm_ioremap_resource(dev, res); 764 if (IS_ERR(inst->queue_ctrl_region)) 765 return PTR_ERR(inst->queue_ctrl_region); 766 } 767 768 dev_dbg(dev, "proxy region=%p, queue_state=%p\n", 769 inst->queue_proxy_region, inst->queue_state_debug_region); 770 771 queue_count = desc->num_valid_queues; 772 if (!queue_count || queue_count > desc->queue_count) { 773 dev_crit(dev, "Invalid Number of queues %d. Max %d\n", 774 queue_count, desc->queue_count); 775 return -ERANGE; 776 } 777 inst->num_valid_queues = queue_count; 778 779 qinst = devm_kcalloc(dev, queue_count, sizeof(*qinst), GFP_KERNEL); 780 if (!qinst) 781 return -ENOMEM; 782 inst->qinsts = qinst; 783 784 chans = devm_kcalloc(dev, queue_count, sizeof(*chans), GFP_KERNEL); 785 if (!chans) 786 return -ENOMEM; 787 inst->chans = chans; 788 789 if (desc->is_sproxy) { 790 struct ti_msgmgr_valid_queue_desc sproxy_desc; 791 792 /* All proxies may be valid in Secure Proxy instance */ 793 for (i = 0; i < queue_count; i++, qinst++, chans++) { 794 sproxy_desc.queue_id = 0; 795 sproxy_desc.proxy_id = i; 796 ret = ti_msgmgr_queue_setup(i, dev, np, inst, 797 desc, &sproxy_desc, qinst, 798 chans); 799 if (ret) 800 return ret; 801 } 802 } else { 803 /* Only Some proxies are valid in Message Manager */ 804 for (i = 0, queue_desc = desc->valid_queues; 805 i < queue_count; i++, qinst++, chans++, queue_desc++) { 806 ret = ti_msgmgr_queue_setup(i, dev, np, inst, 807 desc, queue_desc, qinst, 808 chans); 809 if (ret) 810 return ret; 811 } 812 } 813 814 mbox = &inst->mbox; 815 mbox->dev = dev; 816 mbox->ops = &ti_msgmgr_chan_ops; 817 mbox->chans = inst->chans; 818 mbox->num_chans = inst->num_valid_queues; 819 mbox->txdone_irq = false; 820 mbox->txdone_poll = desc->tx_polled; 821 if (desc->tx_polled) 822 mbox->txpoll_period = desc->tx_poll_timeout_ms; 823 mbox->of_xlate = ti_msgmgr_of_xlate; 824 825 platform_set_drvdata(pdev, inst); 826 ret = devm_mbox_controller_register(dev, mbox); 827 if (ret) 828 dev_err(dev, "Failed to register mbox_controller(%d)\n", ret); 829 830 return ret; 831} 832 833static struct platform_driver ti_msgmgr_driver = { 834 .probe = ti_msgmgr_probe, 835 .driver = { 836 .name = "ti-msgmgr", 837 .of_match_table = of_match_ptr(ti_msgmgr_of_match), 838 }, 839}; 840module_platform_driver(ti_msgmgr_driver); 841 842MODULE_LICENSE("GPL v2"); 843MODULE_DESCRIPTION("TI message manager driver"); 844MODULE_AUTHOR("Nishanth Menon"); 845MODULE_ALIAS("platform:ti-msgmgr"); 846