1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (c) 2019, Linaro Ltd 4 */ 5#include <dt-bindings/power/qcom-aoss-qmp.h> 6#include <linux/clk-provider.h> 7#include <linux/interrupt.h> 8#include <linux/io.h> 9#include <linux/mailbox_client.h> 10#include <linux/module.h> 11#include <linux/platform_device.h> 12#include <linux/pm_domain.h> 13#include <linux/thermal.h> 14#include <linux/slab.h> 15 16#define QMP_DESC_MAGIC 0x0 17#define QMP_DESC_VERSION 0x4 18#define QMP_DESC_FEATURES 0x8 19 20/* AOP-side offsets */ 21#define QMP_DESC_UCORE_LINK_STATE 0xc 22#define QMP_DESC_UCORE_LINK_STATE_ACK 0x10 23#define QMP_DESC_UCORE_CH_STATE 0x14 24#define QMP_DESC_UCORE_CH_STATE_ACK 0x18 25#define QMP_DESC_UCORE_MBOX_SIZE 0x1c 26#define QMP_DESC_UCORE_MBOX_OFFSET 0x20 27 28/* Linux-side offsets */ 29#define QMP_DESC_MCORE_LINK_STATE 0x24 30#define QMP_DESC_MCORE_LINK_STATE_ACK 0x28 31#define QMP_DESC_MCORE_CH_STATE 0x2c 32#define QMP_DESC_MCORE_CH_STATE_ACK 0x30 33#define QMP_DESC_MCORE_MBOX_SIZE 0x34 34#define QMP_DESC_MCORE_MBOX_OFFSET 0x38 35 36#define QMP_STATE_UP GENMASK(15, 0) 37#define QMP_STATE_DOWN GENMASK(31, 16) 38 39#define QMP_MAGIC 0x4d41494c /* mail */ 40#define QMP_VERSION 1 41 42/* 64 bytes is enough to store the requests and provides padding to 4 bytes */ 43#define QMP_MSG_LEN 64 44 45#define QMP_NUM_COOLING_RESOURCES 2 46 47static bool qmp_cdev_max_state = 1; 48 49struct qmp_cooling_device { 50 struct thermal_cooling_device *cdev; 51 struct qmp *qmp; 52 char *name; 53 bool state; 54}; 55 56/** 57 * struct qmp - driver state for QMP implementation 58 * @msgram: iomem referencing the message RAM used for communication 59 * @dev: reference to QMP device 60 * @mbox_client: mailbox client used to ring the doorbell on transmit 61 * @mbox_chan: mailbox channel used to ring the doorbell on transmit 62 * @offset: offset within @msgram where messages should be written 63 * @size: maximum size of the messages to be transmitted 64 * @event: wait_queue for synchronization with the IRQ 65 * @tx_lock: provides synchronization between multiple callers of qmp_send() 66 * @qdss_clk: QDSS clock hw struct 67 * @pd_data: genpd data 68 */ 69struct qmp { 70 void __iomem *msgram; 71 struct device *dev; 72 73 struct mbox_client mbox_client; 74 struct mbox_chan *mbox_chan; 75 76 size_t offset; 77 size_t size; 78 79 wait_queue_head_t event; 80 81 struct mutex tx_lock; 82 83 struct clk_hw qdss_clk; 84 struct genpd_onecell_data pd_data; 85 struct qmp_cooling_device *cooling_devs; 86}; 87 88struct qmp_pd { 89 struct qmp *qmp; 90 struct generic_pm_domain pd; 91}; 92 93#define to_qmp_pd_resource(res) container_of(res, struct qmp_pd, pd) 94 95static void qmp_kick(struct qmp *qmp) 96{ 97 mbox_send_message(qmp->mbox_chan, NULL); 98 mbox_client_txdone(qmp->mbox_chan, 0); 99} 100 101static bool qmp_magic_valid(struct qmp *qmp) 102{ 103 return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC; 104} 105 106static bool qmp_link_acked(struct qmp *qmp) 107{ 108 return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP; 109} 110 111static bool qmp_mcore_channel_acked(struct qmp *qmp) 112{ 113 return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP; 114} 115 116static bool qmp_ucore_channel_up(struct qmp *qmp) 117{ 118 return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP; 119} 120 121static int qmp_open(struct qmp *qmp) 122{ 123 int ret; 124 u32 val; 125 126 if (!qmp_magic_valid(qmp)) { 127 dev_err(qmp->dev, "QMP magic doesn't match\n"); 128 return -EINVAL; 129 } 130 131 val = readl(qmp->msgram + QMP_DESC_VERSION); 132 if (val != QMP_VERSION) { 133 dev_err(qmp->dev, "unsupported QMP version %d\n", val); 134 return -EINVAL; 135 } 136 137 qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET); 138 qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE); 139 if (!qmp->size) { 140 dev_err(qmp->dev, "invalid mailbox size\n"); 141 return -EINVAL; 142 } 143 144 /* Ack remote core's link state */ 145 val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE); 146 writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK); 147 148 /* Set local core's link state to up */ 149 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 150 151 qmp_kick(qmp); 152 153 ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ); 154 if (!ret) { 155 dev_err(qmp->dev, "ucore didn't ack link\n"); 156 goto timeout_close_link; 157 } 158 159 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 160 161 qmp_kick(qmp); 162 163 ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ); 164 if (!ret) { 165 dev_err(qmp->dev, "ucore didn't open channel\n"); 166 goto timeout_close_channel; 167 } 168 169 /* Ack remote core's channel state */ 170 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK); 171 172 qmp_kick(qmp); 173 174 ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ); 175 if (!ret) { 176 dev_err(qmp->dev, "ucore didn't ack channel\n"); 177 goto timeout_close_channel; 178 } 179 180 return 0; 181 182timeout_close_channel: 183 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 184 185timeout_close_link: 186 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 187 qmp_kick(qmp); 188 189 return -ETIMEDOUT; 190} 191 192static void qmp_close(struct qmp *qmp) 193{ 194 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE); 195 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE); 196 qmp_kick(qmp); 197} 198 199static irqreturn_t qmp_intr(int irq, void *data) 200{ 201 struct qmp *qmp = data; 202 203 wake_up_all(&qmp->event); 204 205 return IRQ_HANDLED; 206} 207 208static bool qmp_message_empty(struct qmp *qmp) 209{ 210 return readl(qmp->msgram + qmp->offset) == 0; 211} 212 213/** 214 * qmp_send() - send a message to the AOSS 215 * @qmp: qmp context 216 * @data: message to be sent 217 * @len: length of the message 218 * 219 * Transmit @data to AOSS and wait for the AOSS to acknowledge the message. 220 * @len must be a multiple of 4 and not longer than the mailbox size. Access is 221 * synchronized by this implementation. 222 * 223 * Return: 0 on success, negative errno on failure 224 */ 225static int qmp_send(struct qmp *qmp, const void *data, size_t len) 226{ 227 long time_left; 228 size_t tlen; 229 int ret; 230 231 if (WARN_ON(len + sizeof(u32) > qmp->size)) 232 return -EINVAL; 233 234 if (WARN_ON(len % sizeof(u32))) 235 return -EINVAL; 236 237 mutex_lock(&qmp->tx_lock); 238 239 /* The message RAM only implements 32-bit accesses */ 240 __iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32), 241 data, len / sizeof(u32)); 242 writel(len, qmp->msgram + qmp->offset); 243 244 /* Read back len to confirm data written in message RAM */ 245 tlen = readl(qmp->msgram + qmp->offset); 246 qmp_kick(qmp); 247 248 time_left = wait_event_interruptible_timeout(qmp->event, 249 qmp_message_empty(qmp), HZ); 250 if (!time_left) { 251 dev_err(qmp->dev, "ucore did not ack channel\n"); 252 ret = -ETIMEDOUT; 253 254 /* Clear message from buffer */ 255 writel(0, qmp->msgram + qmp->offset); 256 } else { 257 ret = 0; 258 } 259 260 mutex_unlock(&qmp->tx_lock); 261 262 return ret; 263} 264 265static int qmp_qdss_clk_prepare(struct clk_hw *hw) 266{ 267 static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 1}"; 268 struct qmp *qmp = container_of(hw, struct qmp, qdss_clk); 269 270 return qmp_send(qmp, buf, sizeof(buf)); 271} 272 273static void qmp_qdss_clk_unprepare(struct clk_hw *hw) 274{ 275 static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 0}"; 276 struct qmp *qmp = container_of(hw, struct qmp, qdss_clk); 277 278 qmp_send(qmp, buf, sizeof(buf)); 279} 280 281static const struct clk_ops qmp_qdss_clk_ops = { 282 .prepare = qmp_qdss_clk_prepare, 283 .unprepare = qmp_qdss_clk_unprepare, 284}; 285 286static int qmp_qdss_clk_add(struct qmp *qmp) 287{ 288 static const struct clk_init_data qdss_init = { 289 .ops = &qmp_qdss_clk_ops, 290 .name = "qdss", 291 }; 292 int ret; 293 294 qmp->qdss_clk.init = &qdss_init; 295 ret = clk_hw_register(qmp->dev, &qmp->qdss_clk); 296 if (ret < 0) { 297 dev_err(qmp->dev, "failed to register qdss clock\n"); 298 return ret; 299 } 300 301 ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get, 302 &qmp->qdss_clk); 303 if (ret < 0) { 304 dev_err(qmp->dev, "unable to register of clk hw provider\n"); 305 clk_hw_unregister(&qmp->qdss_clk); 306 } 307 308 return ret; 309} 310 311static void qmp_qdss_clk_remove(struct qmp *qmp) 312{ 313 of_clk_del_provider(qmp->dev->of_node); 314 clk_hw_unregister(&qmp->qdss_clk); 315} 316 317static int qmp_pd_power_toggle(struct qmp_pd *res, bool enable) 318{ 319 char buf[QMP_MSG_LEN] = {}; 320 321 snprintf(buf, sizeof(buf), 322 "{class: image, res: load_state, name: %s, val: %s}", 323 res->pd.name, enable ? "on" : "off"); 324 return qmp_send(res->qmp, buf, sizeof(buf)); 325} 326 327static int qmp_pd_power_on(struct generic_pm_domain *domain) 328{ 329 return qmp_pd_power_toggle(to_qmp_pd_resource(domain), true); 330} 331 332static int qmp_pd_power_off(struct generic_pm_domain *domain) 333{ 334 return qmp_pd_power_toggle(to_qmp_pd_resource(domain), false); 335} 336 337static const char * const sdm845_resources[] = { 338 [AOSS_QMP_LS_CDSP] = "cdsp", 339 [AOSS_QMP_LS_LPASS] = "adsp", 340 [AOSS_QMP_LS_MODEM] = "modem", 341 [AOSS_QMP_LS_SLPI] = "slpi", 342 [AOSS_QMP_LS_SPSS] = "spss", 343 [AOSS_QMP_LS_VENUS] = "venus", 344}; 345 346static int qmp_pd_add(struct qmp *qmp) 347{ 348 struct genpd_onecell_data *data = &qmp->pd_data; 349 struct device *dev = qmp->dev; 350 struct qmp_pd *res; 351 size_t num = ARRAY_SIZE(sdm845_resources); 352 int ret; 353 int i; 354 355 res = devm_kcalloc(dev, num, sizeof(*res), GFP_KERNEL); 356 if (!res) 357 return -ENOMEM; 358 359 data->domains = devm_kcalloc(dev, num, sizeof(*data->domains), 360 GFP_KERNEL); 361 if (!data->domains) 362 return -ENOMEM; 363 364 for (i = 0; i < num; i++) { 365 res[i].qmp = qmp; 366 res[i].pd.name = sdm845_resources[i]; 367 res[i].pd.power_on = qmp_pd_power_on; 368 res[i].pd.power_off = qmp_pd_power_off; 369 370 ret = pm_genpd_init(&res[i].pd, NULL, true); 371 if (ret < 0) { 372 dev_err(dev, "failed to init genpd\n"); 373 goto unroll_genpds; 374 } 375 376 data->domains[i] = &res[i].pd; 377 } 378 379 data->num_domains = i; 380 381 ret = of_genpd_add_provider_onecell(dev->of_node, data); 382 if (ret < 0) 383 goto unroll_genpds; 384 385 return 0; 386 387unroll_genpds: 388 for (i--; i >= 0; i--) 389 pm_genpd_remove(data->domains[i]); 390 391 return ret; 392} 393 394static void qmp_pd_remove(struct qmp *qmp) 395{ 396 struct genpd_onecell_data *data = &qmp->pd_data; 397 struct device *dev = qmp->dev; 398 int i; 399 400 of_genpd_del_provider(dev->of_node); 401 402 for (i = 0; i < data->num_domains; i++) 403 pm_genpd_remove(data->domains[i]); 404} 405 406static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev, 407 unsigned long *state) 408{ 409 *state = qmp_cdev_max_state; 410 return 0; 411} 412 413static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev, 414 unsigned long *state) 415{ 416 struct qmp_cooling_device *qmp_cdev = cdev->devdata; 417 418 *state = qmp_cdev->state; 419 return 0; 420} 421 422static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev, 423 unsigned long state) 424{ 425 struct qmp_cooling_device *qmp_cdev = cdev->devdata; 426 char buf[QMP_MSG_LEN] = {}; 427 bool cdev_state; 428 int ret; 429 430 /* Normalize state */ 431 cdev_state = !!state; 432 433 if (qmp_cdev->state == state) 434 return 0; 435 436 snprintf(buf, sizeof(buf), 437 "{class: volt_flr, event:zero_temp, res:%s, value:%s}", 438 qmp_cdev->name, 439 cdev_state ? "on" : "off"); 440 441 ret = qmp_send(qmp_cdev->qmp, buf, sizeof(buf)); 442 443 if (!ret) 444 qmp_cdev->state = cdev_state; 445 446 return ret; 447} 448 449static struct thermal_cooling_device_ops qmp_cooling_device_ops = { 450 .get_max_state = qmp_cdev_get_max_state, 451 .get_cur_state = qmp_cdev_get_cur_state, 452 .set_cur_state = qmp_cdev_set_cur_state, 453}; 454 455static int qmp_cooling_device_add(struct qmp *qmp, 456 struct qmp_cooling_device *qmp_cdev, 457 struct device_node *node) 458{ 459 char *cdev_name = (char *)node->name; 460 461 qmp_cdev->qmp = qmp; 462 qmp_cdev->state = !qmp_cdev_max_state; 463 qmp_cdev->name = cdev_name; 464 qmp_cdev->cdev = devm_thermal_of_cooling_device_register 465 (qmp->dev, node, 466 cdev_name, 467 qmp_cdev, &qmp_cooling_device_ops); 468 469 if (IS_ERR(qmp_cdev->cdev)) 470 dev_err(qmp->dev, "unable to register %s cooling device\n", 471 cdev_name); 472 473 return PTR_ERR_OR_ZERO(qmp_cdev->cdev); 474} 475 476static int qmp_cooling_devices_register(struct qmp *qmp) 477{ 478 struct device_node *np, *child; 479 int count = 0; 480 int ret; 481 482 np = qmp->dev->of_node; 483 484 qmp->cooling_devs = devm_kcalloc(qmp->dev, QMP_NUM_COOLING_RESOURCES, 485 sizeof(*qmp->cooling_devs), 486 GFP_KERNEL); 487 488 if (!qmp->cooling_devs) 489 return -ENOMEM; 490 491 for_each_available_child_of_node(np, child) { 492 if (!of_find_property(child, "#cooling-cells", NULL)) 493 continue; 494 ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++], 495 child); 496 if (ret) { 497 of_node_put(child); 498 goto unroll; 499 } 500 } 501 502 if (!count) 503 devm_kfree(qmp->dev, qmp->cooling_devs); 504 505 return 0; 506 507unroll: 508 while (--count >= 0) 509 thermal_cooling_device_unregister 510 (qmp->cooling_devs[count].cdev); 511 devm_kfree(qmp->dev, qmp->cooling_devs); 512 513 return ret; 514} 515 516static void qmp_cooling_devices_remove(struct qmp *qmp) 517{ 518 int i; 519 520 for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++) 521 thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev); 522} 523 524static int qmp_probe(struct platform_device *pdev) 525{ 526 struct resource *res; 527 struct qmp *qmp; 528 int irq; 529 int ret; 530 531 qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL); 532 if (!qmp) 533 return -ENOMEM; 534 535 qmp->dev = &pdev->dev; 536 init_waitqueue_head(&qmp->event); 537 mutex_init(&qmp->tx_lock); 538 539 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 540 qmp->msgram = devm_ioremap_resource(&pdev->dev, res); 541 if (IS_ERR(qmp->msgram)) 542 return PTR_ERR(qmp->msgram); 543 544 qmp->mbox_client.dev = &pdev->dev; 545 qmp->mbox_client.knows_txdone = true; 546 qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0); 547 if (IS_ERR(qmp->mbox_chan)) { 548 dev_err(&pdev->dev, "failed to acquire ipc mailbox\n"); 549 return PTR_ERR(qmp->mbox_chan); 550 } 551 552 irq = platform_get_irq(pdev, 0); 553 ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0, 554 "aoss-qmp", qmp); 555 if (ret < 0) { 556 dev_err(&pdev->dev, "failed to request interrupt\n"); 557 goto err_free_mbox; 558 } 559 560 ret = qmp_open(qmp); 561 if (ret < 0) 562 goto err_free_mbox; 563 564 ret = qmp_qdss_clk_add(qmp); 565 if (ret) 566 goto err_close_qmp; 567 568 ret = qmp_pd_add(qmp); 569 if (ret) 570 goto err_remove_qdss_clk; 571 572 ret = qmp_cooling_devices_register(qmp); 573 if (ret) 574 dev_err(&pdev->dev, "failed to register aoss cooling devices\n"); 575 576 platform_set_drvdata(pdev, qmp); 577 578 return 0; 579 580err_remove_qdss_clk: 581 qmp_qdss_clk_remove(qmp); 582err_close_qmp: 583 qmp_close(qmp); 584err_free_mbox: 585 mbox_free_channel(qmp->mbox_chan); 586 587 return ret; 588} 589 590static int qmp_remove(struct platform_device *pdev) 591{ 592 struct qmp *qmp = platform_get_drvdata(pdev); 593 594 qmp_qdss_clk_remove(qmp); 595 qmp_pd_remove(qmp); 596 qmp_cooling_devices_remove(qmp); 597 598 qmp_close(qmp); 599 mbox_free_channel(qmp->mbox_chan); 600 601 return 0; 602} 603 604static const struct of_device_id qmp_dt_match[] = { 605 { .compatible = "qcom,sc7180-aoss-qmp", }, 606 { .compatible = "qcom,sdm845-aoss-qmp", }, 607 { .compatible = "qcom,sm8150-aoss-qmp", }, 608 { .compatible = "qcom,sm8250-aoss-qmp", }, 609 {} 610}; 611MODULE_DEVICE_TABLE(of, qmp_dt_match); 612 613static struct platform_driver qmp_driver = { 614 .driver = { 615 .name = "qcom_aoss_qmp", 616 .of_match_table = qmp_dt_match, 617 }, 618 .probe = qmp_probe, 619 .remove = qmp_remove, 620}; 621module_platform_driver(qmp_driver); 622 623MODULE_DESCRIPTION("Qualcomm AOSS QMP driver"); 624MODULE_LICENSE("GPL v2"); 625