Lines Matching refs:qca
281 struct qca_data *qca = hu->priv;
284 bool old_vote = (qca->tx_vote | qca->rx_vote);
289 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
292 qca->vote_off_ms += diff;
294 qca->vote_on_ms += diff;
298 qca->tx_vote = true;
299 qca->tx_votes_on++;
303 qca->rx_vote = true;
304 qca->rx_votes_on++;
308 qca->tx_vote = false;
309 qca->tx_votes_off++;
313 qca->rx_vote = false;
314 qca->rx_votes_off++;
322 new_vote = qca->rx_vote | qca->tx_vote;
333 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
336 qca->votes_on++;
337 qca->vote_off_ms += diff;
339 qca->votes_off++;
340 qca->vote_on_ms += diff;
342 qca->vote_last_jif = jiffies;
353 struct qca_data *qca = hu->priv;
366 skb_queue_tail(&qca->txq, skb);
373 struct qca_data *qca = container_of(work, struct qca_data,
375 struct hci_uart *hu = qca->hu;
384 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
390 qca->ibs_sent_wakes++;
393 retrans_delay = msecs_to_jiffies(qca->wake_retrans);
394 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
396 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
404 struct qca_data *qca = container_of(work, struct qca_data,
406 struct hci_uart *hu = qca->hu;
413 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
414 qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
422 qca->ibs_sent_wacks++;
424 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
432 struct qca_data *qca = container_of(work, struct qca_data,
434 struct hci_uart *hu = qca->hu;
443 struct qca_data *qca = container_of(work, struct qca_data,
445 struct hci_uart *hu = qca->hu;
460 struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
461 struct hci_uart *hu = qca->hu;
464 BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
466 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
469 switch (qca->tx_ibs_state) {
476 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
477 qca->ibs_sent_slps++;
478 queue_work(qca->workqueue, &qca->ws_tx_vote_off);
484 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
488 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
493 struct qca_data *qca = from_timer(qca, t, wake_retrans_timer);
494 struct hci_uart *hu = qca->hu;
499 hu, qca->tx_ibs_state);
501 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
505 if (test_bit(QCA_SUSPENDING, &qca->flags)) {
506 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
510 switch (qca->tx_ibs_state) {
518 qca->ibs_sent_wakes++;
519 retrans_delay = msecs_to_jiffies(qca->wake_retrans);
520 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
526 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
530 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
539 struct qca_data *qca = container_of(work, struct qca_data,
541 struct hci_uart *hu = qca->hu;
543 mutex_lock(&qca->hci_memdump_lock);
544 if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) {
545 qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
546 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
554 mutex_unlock(&qca->hci_memdump_lock);
562 struct qca_data *qca;
569 qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
570 if (!qca)
573 skb_queue_head_init(&qca->txq);
574 skb_queue_head_init(&qca->tx_wait_q);
575 skb_queue_head_init(&qca->rx_memdump_q);
576 spin_lock_init(&qca->hci_ibs_lock);
577 mutex_init(&qca->hci_memdump_lock);
578 qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
579 if (!qca->workqueue) {
581 kfree(qca);
585 INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
586 INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
587 INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
588 INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
589 INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
590 INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout,
592 init_waitqueue_head(&qca->suspend_wait_q);
594 qca->hu = hu;
595 init_completion(&qca->drop_ev_comp);
598 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
599 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
601 qca->vote_last_jif = jiffies;
603 hu->priv = qca;
615 timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
616 qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
618 timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
619 qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
622 qca->tx_idle_delay, qca->wake_retrans);
630 struct qca_data *qca = hu->priv;
637 if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags))
644 debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
645 debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
647 &qca->ibs_sent_slps);
649 &qca->ibs_sent_wakes);
651 &qca->ibs_sent_wacks);
653 &qca->ibs_recv_slps);
655 &qca->ibs_recv_wakes);
657 &qca->ibs_recv_wacks);
658 debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
659 debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
660 debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
661 debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
662 debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
663 debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
664 debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
665 debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
666 debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
667 debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
671 debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
673 &qca->tx_idle_delay);
679 struct qca_data *qca = hu->priv;
681 BT_DBG("hu %p qca flush", hu);
683 skb_queue_purge(&qca->tx_wait_q);
684 skb_queue_purge(&qca->txq);
692 struct qca_data *qca = hu->priv;
694 BT_DBG("hu %p qca close", hu);
698 skb_queue_purge(&qca->tx_wait_q);
699 skb_queue_purge(&qca->txq);
700 skb_queue_purge(&qca->rx_memdump_q);
701 destroy_workqueue(qca->workqueue);
702 del_timer_sync(&qca->tx_idle_timer);
703 del_timer_sync(&qca->wake_retrans_timer);
704 qca->hu = NULL;
706 kfree_skb(qca->rx_skb);
710 kfree(qca);
720 struct qca_data *qca = hu->priv;
724 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
726 qca->ibs_recv_wakes++;
729 if (test_bit(QCA_SUSPENDING, &qca->flags)) {
730 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
734 switch (qca->rx_ibs_state) {
739 queue_work(qca->workqueue, &qca->ws_awake_rx);
740 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
751 qca->ibs_sent_wacks++;
757 qca->rx_ibs_state);
761 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
772 struct qca_data *qca = hu->priv;
774 BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
776 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
778 qca->ibs_recv_slps++;
780 switch (qca->rx_ibs_state) {
783 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
785 queue_work(qca->workqueue, &qca->ws_rx_vote_off);
794 qca->rx_ibs_state);
798 wake_up_interruptible(&qca->suspend_wait_q);
800 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
808 struct qca_data *qca = hu->priv;
813 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
815 qca->ibs_recv_wacks++;
818 if (test_bit(QCA_SUSPENDING, &qca->flags)) {
819 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
823 switch (qca->tx_ibs_state) {
827 qca->tx_ibs_state);
832 while ((skb = skb_dequeue(&qca->tx_wait_q)))
833 skb_queue_tail(&qca->txq, skb);
836 del_timer(&qca->wake_retrans_timer);
837 idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
838 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
839 qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
845 qca->tx_ibs_state);
849 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
861 struct qca_data *qca = hu->priv;
863 BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
864 qca->tx_ibs_state);
866 if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
876 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
882 if (test_bit(QCA_IBS_DISABLED, &qca->flags) ||
883 test_bit(QCA_SUSPENDING, &qca->flags)) {
884 skb_queue_tail(&qca->txq, skb);
885 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
890 switch (qca->tx_ibs_state) {
893 skb_queue_tail(&qca->txq, skb);
894 idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
895 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
901 skb_queue_tail(&qca->tx_wait_q, skb);
903 qca->tx_ibs_state = HCI_IBS_TX_WAKING;
905 queue_work(qca->workqueue, &qca->ws_awake_device);
911 skb_queue_tail(&qca->tx_wait_q, skb);
916 qca->tx_ibs_state);
921 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
977 struct qca_data *qca = container_of(work, struct qca_data,
979 struct hci_uart *hu = qca->hu;
982 struct qca_memdump_data *qca_memdump = qca->qca_memdump;
991 while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
993 mutex_lock(&qca->hci_memdump_lock);
997 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
998 qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
999 mutex_unlock(&qca->hci_memdump_lock);
1007 mutex_unlock(&qca->hci_memdump_lock);
1011 qca->qca_memdump = qca_memdump;
1014 qca->memdump_state = QCA_MEMDUMP_COLLECTING;
1027 set_bit(QCA_IBS_DISABLED, &qca->flags);
1028 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1035 qca->qca_memdump = NULL;
1036 mutex_unlock(&qca->hci_memdump_lock);
1042 queue_delayed_work(qca->workqueue,
1043 &qca->ctrl_memdump_timeout,
1063 qca->qca_memdump = NULL;
1064 mutex_unlock(&qca->hci_memdump_lock);
1116 qca->qca_memdump = qca_memdump;
1126 cancel_delayed_work(&qca->ctrl_memdump_timeout);
1127 kfree(qca->qca_memdump);
1128 qca->qca_memdump = NULL;
1129 qca->memdump_state = QCA_MEMDUMP_COLLECTED;
1130 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1133 mutex_unlock(&qca->hci_memdump_lock);
1142 struct qca_data *qca = hu->priv;
1144 set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1145 skb_queue_tail(&qca->rx_memdump_q, skb);
1146 queue_work(qca->workqueue, &qca->ctrl_memdump_evt);
1154 struct qca_data *qca = hu->priv;
1156 if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) {
1170 complete(&qca->drop_ev_comp);
1220 struct qca_data *qca = hu->priv;
1225 qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
1227 if (IS_ERR(qca->rx_skb)) {
1228 int err = PTR_ERR(qca->rx_skb);
1230 qca->rx_skb = NULL;
1239 struct qca_data *qca = hu->priv;
1241 return skb_dequeue(&qca->txq);
1283 struct qca_data *qca = hu->priv;
1302 skb_queue_tail(&qca->txq, skb);
1307 while (!skb_queue_empty(&qca->txq))
1407 struct qca_data *qca = hu->priv;
1428 reinit_completion(&qca->drop_ev_comp);
1429 set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
1448 if (!wait_for_completion_timeout(&qca->drop_ev_comp,
1455 clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
1464 struct qca_data *qca = hu->priv;
1481 skb_queue_tail(&qca->txq, skb);
1490 struct qca_data *qca = hu->priv;
1492 wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION,
1495 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1501 struct qca_data *qca = hu->priv;
1503 set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1504 set_bit(QCA_HW_ERROR_EVENT, &qca->flags);
1505 bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
1507 if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
1514 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1517 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
1525 mutex_lock(&qca->hci_memdump_lock);
1526 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
1528 if (qca->qca_memdump) {
1529 vfree(qca->qca_memdump->memdump_buf_head);
1530 kfree(qca->qca_memdump);
1531 qca->qca_memdump = NULL;
1533 qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
1534 cancel_delayed_work(&qca->ctrl_memdump_timeout);
1536 mutex_unlock(&qca->hci_memdump_lock);
1538 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
1539 qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
1540 cancel_work_sync(&qca->ctrl_memdump_evt);
1541 skb_queue_purge(&qca->rx_memdump_q);
1544 clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
1550 struct qca_data *qca = hu->priv;
1552 set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1553 if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
1554 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1557 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
1565 mutex_lock(&qca->hci_memdump_lock);
1566 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
1567 qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
1568 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
1575 mutex_unlock(&qca->hci_memdump_lock);
1633 struct qca_data *qca = hu->priv;
1653 clear_bit(QCA_BT_OFF, &qca->flags);
1660 struct qca_data *qca = hu->priv;
1672 clear_bit(QCA_ROM_FW, &qca->flags);
1674 set_bit(QCA_IBS_DISABLED, &qca->flags);
1684 qca->memdump_state = QCA_MEMDUMP_IDLE;
1691 clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
1725 clear_bit(QCA_IBS_DISABLED, &qca->flags);
1731 set_bit(QCA_ROM_FW, &qca->flags);
1738 set_bit(QCA_ROM_FW, &qca->flags);
1823 struct qca_data *qca = hu->priv;
1831 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
1832 set_bit(QCA_IBS_DISABLED, &qca->flags);
1834 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
1852 set_bit(QCA_BT_OFF, &qca->flags);
1858 struct qca_data *qca = hu->priv;
1864 del_timer_sync(&qca->wake_retrans_timer);
1865 del_timer_sync(&qca->tx_idle_timer);
1869 && qca->memdump_state == QCA_MEMDUMP_IDLE) {
1921 static int qca_init_regulators(struct qca_power *qca,
1928 bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL);
1935 ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk);
1945 qca->vreg_bulk = bulk;
1946 qca->num_vregs = num_vregs;
2079 struct qca_data *qca = hu->priv;
2084 if (test_bit(QCA_BT_OFF, &qca->flags) ||
2115 struct qca_data *qca = hu->priv;
2122 set_bit(QCA_SUSPENDING, &qca->flags);
2127 if (test_bit(QCA_ROM_FW, &qca->flags))
2134 if (test_bit(QCA_BT_OFF, &qca->flags) &&
2135 !test_bit(QCA_SSR_TRIGGERED, &qca->flags))
2138 if (test_bit(QCA_IBS_DISABLED, &qca->flags) ||
2139 test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
2140 wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ?
2148 wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED,
2151 if (test_bit(QCA_IBS_DISABLED, &qca->flags)) {
2158 cancel_work_sync(&qca->ws_awake_device);
2159 cancel_work_sync(&qca->ws_awake_rx);
2161 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
2164 switch (qca->tx_ibs_state) {
2166 del_timer(&qca->wake_retrans_timer);
2169 del_timer(&qca->tx_idle_timer);
2180 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
2181 qca->ibs_sent_slps++;
2189 BT_ERR("Spurious tx state %d", qca->tx_ibs_state);
2194 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
2208 ret = wait_event_interruptible_timeout(qca->suspend_wait_q,
2209 qca->rx_ibs_state == HCI_IBS_RX_ASLEEP,
2219 clear_bit(QCA_SUSPENDING, &qca->flags);
2229 struct qca_data *qca = hu->priv;
2231 clear_bit(QCA_SUSPENDING, &qca->flags);