Lines Matching refs:ctrl

103 	struct slim_controller  ctrl;
120 static void qcom_slim_queue_tx(struct qcom_slim_ctrl *ctrl, void *buf,
125 __iowrite32_copy(ctrl->base + tx_reg, buf, count);
131 static void *slim_alloc_rxbuf(struct qcom_slim_ctrl *ctrl)
136 spin_lock_irqsave(&ctrl->rx.lock, flags);
137 if ((ctrl->rx.tail + 1) % ctrl->rx.n == ctrl->rx.head) {
138 spin_unlock_irqrestore(&ctrl->rx.lock, flags);
139 dev_err(ctrl->dev, "RX QUEUE full!");
142 idx = ctrl->rx.tail;
143 ctrl->rx.tail = (ctrl->rx.tail + 1) % ctrl->rx.n;
144 spin_unlock_irqrestore(&ctrl->rx.lock, flags);
146 return ctrl->rx.base + (idx * ctrl->rx.sl_sz);
149 static void slim_ack_txn(struct qcom_slim_ctrl *ctrl, int err)
155 spin_lock_irqsave(&ctrl->tx.lock, flags);
156 idx = ctrl->tx.head;
157 ctrl->tx.head = (ctrl->tx.head + 1) % ctrl->tx.n;
158 spin_unlock_irqrestore(&ctrl->tx.lock, flags);
160 comp = ctrl->wr_comp[idx];
161 ctrl->wr_comp[idx] = NULL;
166 static irqreturn_t qcom_slim_handle_tx_irq(struct qcom_slim_ctrl *ctrl,
173 ctrl->base + MGR_INT_CLR);
176 u32 mgr_stat = readl_relaxed(ctrl->base + MGR_STATUS);
177 u32 mgr_ie_stat = readl_relaxed(ctrl->base + MGR_IE_STAT);
178 u32 frm_stat = readl_relaxed(ctrl->base + FRM_STAT);
179 u32 frm_cfg = readl_relaxed(ctrl->base + FRM_CFG);
180 u32 frm_intr_stat = readl_relaxed(ctrl->base + FRM_INT_STAT);
181 u32 frm_ie_stat = readl_relaxed(ctrl->base + FRM_IE_STAT);
182 u32 intf_stat = readl_relaxed(ctrl->base + INTF_STAT);
183 u32 intf_intr_stat = readl_relaxed(ctrl->base + INTF_INT_STAT);
184 u32 intf_ie_stat = readl_relaxed(ctrl->base + INTF_IE_STAT);
186 writel_relaxed(MGR_INT_TX_NACKED_2, ctrl->base + MGR_INT_CLR);
188 dev_err(ctrl->dev, "TX Nack MGR:int:0x%x, stat:0x%x\n",
190 dev_err(ctrl->dev, "TX Nack MGR:ie:0x%x\n", mgr_ie_stat);
191 dev_err(ctrl->dev, "TX Nack FRM:int:0x%x, stat:0x%x\n",
193 dev_err(ctrl->dev, "TX Nack FRM:cfg:0x%x, ie:0x%x\n",
195 dev_err(ctrl->dev, "TX Nack INTF:intr:0x%x, stat:0x%x\n",
197 dev_err(ctrl->dev, "TX Nack INTF:ie:0x%x\n",
202 slim_ack_txn(ctrl, err);
207 static irqreturn_t qcom_slim_handle_rx_irq(struct qcom_slim_ctrl *ctrl,
214 pkt[0] = readl_relaxed(ctrl->base + MGR_RX_MSG);
224 rx_buf = (u32 *)slim_alloc_rxbuf(ctrl);
226 dev_err(ctrl->dev, "dropping RX:0x%x due to RX full\n",
236 __ioread32_copy(rx_buf + 1, ctrl->base + MGR_RX_MSG + 4,
246 slim_msg_response(&ctrl->ctrl, (u8 *)(rx_buf + 1),
250 dev_err(ctrl->dev, "unsupported MC,%x MT:%x\n",
255 writel(MGR_INT_RX_MSG_RCVD, ctrl->base +
258 queue_work(ctrl->rxwq, &ctrl->wd);
265 struct qcom_slim_ctrl *ctrl = d;
266 u32 stat = readl_relaxed(ctrl->base + MGR_INT_STAT);
270 ret = qcom_slim_handle_tx_irq(ctrl, stat);
273 ret = qcom_slim_handle_rx_irq(ctrl, stat);
280 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
282 clk_prepare_enable(ctrl->hclk);
283 clk_prepare_enable(ctrl->rclk);
284 enable_irq(ctrl->irq);
286 writel_relaxed(1, ctrl->base + FRM_WAKEUP);
302 static void *slim_alloc_txbuf(struct qcom_slim_ctrl *ctrl,
309 spin_lock_irqsave(&ctrl->tx.lock, flags);
310 if (((ctrl->tx.head + 1) % ctrl->tx.n) == ctrl->tx.tail) {
311 spin_unlock_irqrestore(&ctrl->tx.lock, flags);
312 dev_err(ctrl->dev, "controller TX buf unavailable");
315 idx = ctrl->tx.tail;
316 ctrl->wr_comp[idx] = done;
317 ctrl->tx.tail = (ctrl->tx.tail + 1) % ctrl->tx.n;
319 spin_unlock_irqrestore(&ctrl->tx.lock, flags);
321 return ctrl->tx.base + (idx * ctrl->tx.sl_sz);
328 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
330 void *pbuf = slim_alloc_txbuf(ctrl, txn, &done);
343 pbuf = slim_alloc_txbuf(ctrl, txn, &done);
376 qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG);
380 dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
392 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
413 ret = slim_do_transfer(&ctrl->ctrl, &txn);
416 dev_err(ctrl->dev, "set LA:0x%x failed:ret:%d\n",
421 static int slim_get_current_rxbuf(struct qcom_slim_ctrl *ctrl, void *buf)
425 spin_lock_irqsave(&ctrl->rx.lock, flags);
426 if (ctrl->rx.tail == ctrl->rx.head) {
427 spin_unlock_irqrestore(&ctrl->rx.lock, flags);
430 memcpy(buf, ctrl->rx.base + (ctrl->rx.head * ctrl->rx.sl_sz),
431 ctrl->rx.sl_sz);
433 ctrl->rx.head = (ctrl->rx.head + 1) % ctrl->rx.n;
434 spin_unlock_irqrestore(&ctrl->rx.lock, flags);
444 struct qcom_slim_ctrl *ctrl = container_of(work, struct qcom_slim_ctrl,
447 while ((slim_get_current_rxbuf(ctrl, buf)) != -ENODATA) {
460 ret = slim_device_report_present(&ctrl->ctrl, &ea,
463 dev_err(ctrl->dev, "assign laddr failed:%d\n",
466 dev_err(ctrl->dev, "unexpected message:mc:%x, mt:%x\n",
473 struct qcom_slim_ctrl *ctrl)
475 if (!ctrl->slew_reg) {
477 ctrl->slew_reg = devm_platform_ioremap_resource_byname(pdev, "slew");
478 if (IS_ERR(ctrl->slew_reg))
482 writel_relaxed(1, ctrl->slew_reg);
489 struct qcom_slim_ctrl *ctrl;
494 ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
495 if (!ctrl)
498 ctrl->hclk = devm_clk_get(&pdev->dev, "iface");
499 if (IS_ERR(ctrl->hclk))
500 return PTR_ERR(ctrl->hclk);
502 ctrl->rclk = devm_clk_get(&pdev->dev, "core");
503 if (IS_ERR(ctrl->rclk))
504 return PTR_ERR(ctrl->rclk);
506 ret = clk_set_rate(ctrl->rclk, SLIM_ROOT_FREQ);
512 ctrl->irq = platform_get_irq(pdev, 0);
513 if (ctrl->irq < 0) {
515 return ctrl->irq;
518 sctrl = &ctrl->ctrl;
520 ctrl->dev = &pdev->dev;
521 platform_set_drvdata(pdev, ctrl);
522 dev_set_drvdata(ctrl->dev, ctrl);
524 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
525 ctrl->base = devm_ioremap_resource(ctrl->dev, slim_mem);
526 if (IS_ERR(ctrl->base))
527 return PTR_ERR(ctrl->base);
532 ctrl->tx.n = QCOM_TX_MSGS;
533 ctrl->tx.sl_sz = SLIM_MSGQ_BUF_LEN;
534 ctrl->rx.n = QCOM_RX_MSGS;
535 ctrl->rx.sl_sz = SLIM_MSGQ_BUF_LEN;
536 ctrl->wr_comp = kcalloc(QCOM_TX_MSGS, sizeof(struct completion *),
538 if (!ctrl->wr_comp)
541 spin_lock_init(&ctrl->rx.lock);
542 spin_lock_init(&ctrl->tx.lock);
543 INIT_WORK(&ctrl->wd, qcom_slim_rxwq);
544 ctrl->rxwq = create_singlethread_workqueue("qcom_slim_rx");
545 if (!ctrl->rxwq) {
546 dev_err(ctrl->dev, "Failed to start Rx WQ\n");
550 ctrl->framer.rootfreq = SLIM_ROOT_FREQ / 8;
551 ctrl->framer.superfreq =
552 ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
553 sctrl->a_framer = &ctrl->framer;
556 qcom_slim_prg_slew(pdev, ctrl);
558 ret = devm_request_irq(&pdev->dev, ctrl->irq, qcom_slim_interrupt,
559 IRQF_TRIGGER_HIGH, "qcom_slim_irq", ctrl);
565 ret = clk_prepare_enable(ctrl->hclk);
569 ret = clk_prepare_enable(ctrl->rclk);
573 ctrl->tx.base = devm_kcalloc(&pdev->dev, ctrl->tx.n, ctrl->tx.sl_sz,
575 if (!ctrl->tx.base) {
580 ctrl->rx.base = devm_kcalloc(&pdev->dev,ctrl->rx.n, ctrl->rx.sl_sz,
582 if (!ctrl->rx.base) {
588 ret = slim_register_controller(&ctrl->ctrl);
590 dev_err(ctrl->dev, "error adding controller\n");
594 ver = readl_relaxed(ctrl->base);
598 writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
600 ctrl->base + CFG_PORT(COMP_TRUST_CFG, ver));
604 MGR_INT_TX_MSG_SENT), ctrl->base + MGR_INT_EN);
605 writel(1, ctrl->base + MGR_CFG);
609 ctrl->base + FRM_CFG);
610 writel(MGR_CFG_ENABLE, ctrl->base + MGR_CFG);
611 writel(1, ctrl->base + INTF_CFG);
612 writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
620 dev_dbg(ctrl->dev, "QCOM SB controller is up:ver:0x%x!\n", ver);
624 clk_disable_unprepare(ctrl->rclk);
626 clk_disable_unprepare(ctrl->hclk);
629 destroy_workqueue(ctrl->rxwq);
635 struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
638 slim_unregister_controller(&ctrl->ctrl);
639 clk_disable_unprepare(ctrl->rclk);
640 clk_disable_unprepare(ctrl->hclk);
641 destroy_workqueue(ctrl->rxwq);
652 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device);
656 ret = slim_ctrl_clk_pause(&ctrl->ctrl, false, SLIM_CLK_UNSPECIFIED);
660 disable_irq(ctrl->irq);
661 clk_disable_unprepare(ctrl->hclk);
662 clk_disable_unprepare(ctrl->rclk);
669 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device);
673 ret = slim_ctrl_clk_pause(&ctrl->ctrl, true, 0);