Lines Matching refs:qca
284 struct qca_data *qca = hu->priv;
287 bool old_vote = (qca->tx_vote | qca->rx_vote);
292 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
295 qca->vote_off_ms += diff;
297 qca->vote_on_ms += diff;
301 qca->tx_vote = true;
302 qca->tx_votes_on++;
306 qca->rx_vote = true;
307 qca->rx_votes_on++;
311 qca->tx_vote = false;
312 qca->tx_votes_off++;
316 qca->rx_vote = false;
317 qca->rx_votes_off++;
325 new_vote = qca->rx_vote | qca->tx_vote;
336 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
339 qca->votes_on++;
340 qca->vote_off_ms += diff;
342 qca->votes_off++;
343 qca->vote_on_ms += diff;
345 qca->vote_last_jif = jiffies;
356 struct qca_data *qca = hu->priv;
369 skb_queue_tail(&qca->txq, skb);
376 struct qca_data *qca = container_of(work, struct qca_data,
378 struct hci_uart *hu = qca->hu;
387 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
393 qca->ibs_sent_wakes++;
396 retrans_delay = msecs_to_jiffies(qca->wake_retrans);
397 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
399 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
407 struct qca_data *qca = container_of(work, struct qca_data,
409 struct hci_uart *hu = qca->hu;
416 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
417 qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
425 qca->ibs_sent_wacks++;
427 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
435 struct qca_data *qca = container_of(work, struct qca_data,
437 struct hci_uart *hu = qca->hu;
446 struct qca_data *qca = container_of(work, struct qca_data,
448 struct hci_uart *hu = qca->hu;
463 struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
464 struct hci_uart *hu = qca->hu;
467 BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
469 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
472 switch (qca->tx_ibs_state) {
479 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
480 qca->ibs_sent_slps++;
481 queue_work(qca->workqueue, &qca->ws_tx_vote_off);
487 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
491 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
496 struct qca_data *qca = from_timer(qca, t, wake_retrans_timer);
497 struct hci_uart *hu = qca->hu;
502 hu, qca->tx_ibs_state);
504 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
508 if (test_bit(QCA_SUSPENDING, &qca->flags)) {
509 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
513 switch (qca->tx_ibs_state) {
521 qca->ibs_sent_wakes++;
522 retrans_delay = msecs_to_jiffies(qca->wake_retrans);
523 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
529 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
533 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
542 struct qca_data *qca = container_of(work, struct qca_data,
544 struct hci_uart *hu = qca->hu;
546 mutex_lock(&qca->hci_memdump_lock);
547 if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) {
548 qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
549 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
557 mutex_unlock(&qca->hci_memdump_lock);
565 struct qca_data *qca;
572 qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
573 if (!qca)
576 skb_queue_head_init(&qca->txq);
577 skb_queue_head_init(&qca->tx_wait_q);
578 skb_queue_head_init(&qca->rx_memdump_q);
579 spin_lock_init(&qca->hci_ibs_lock);
580 mutex_init(&qca->hci_memdump_lock);
581 qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
582 if (!qca->workqueue) {
584 kfree(qca);
588 INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
589 INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
590 INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
591 INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
592 INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
593 INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout,
595 init_waitqueue_head(&qca->suspend_wait_q);
597 qca->hu = hu;
598 init_completion(&qca->drop_ev_comp);
601 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
602 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
604 qca->vote_last_jif = jiffies;
606 hu->priv = qca;
628 timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
629 qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
631 timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
632 qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
635 qca->tx_idle_delay, qca->wake_retrans);
643 struct qca_data *qca = hu->priv;
650 if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags))
657 debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
658 debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
660 &qca->ibs_sent_slps);
662 &qca->ibs_sent_wakes);
664 &qca->ibs_sent_wacks);
666 &qca->ibs_recv_slps);
668 &qca->ibs_recv_wakes);
670 &qca->ibs_recv_wacks);
671 debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
672 debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
673 debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
674 debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
675 debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
676 debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
677 debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
678 debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
679 debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
680 debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
684 debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
686 &qca->tx_idle_delay);
692 struct qca_data *qca = hu->priv;
694 BT_DBG("hu %p qca flush", hu);
696 skb_queue_purge(&qca->tx_wait_q);
697 skb_queue_purge(&qca->txq);
705 struct qca_data *qca = hu->priv;
707 BT_DBG("hu %p qca close", hu);
711 skb_queue_purge(&qca->tx_wait_q);
712 skb_queue_purge(&qca->txq);
713 skb_queue_purge(&qca->rx_memdump_q);
720 timer_shutdown_sync(&qca->tx_idle_timer);
721 timer_shutdown_sync(&qca->wake_retrans_timer);
722 destroy_workqueue(qca->workqueue);
723 qca->hu = NULL;
725 kfree_skb(qca->rx_skb);
729 kfree(qca);
739 struct qca_data *qca = hu->priv;
743 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
745 qca->ibs_recv_wakes++;
748 if (test_bit(QCA_SUSPENDING, &qca->flags)) {
749 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
753 switch (qca->rx_ibs_state) {
758 queue_work(qca->workqueue, &qca->ws_awake_rx);
759 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
770 qca->ibs_sent_wacks++;
776 qca->rx_ibs_state);
780 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
791 struct qca_data *qca = hu->priv;
793 BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
795 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
797 qca->ibs_recv_slps++;
799 switch (qca->rx_ibs_state) {
802 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
804 queue_work(qca->workqueue, &qca->ws_rx_vote_off);
813 qca->rx_ibs_state);
817 wake_up_interruptible(&qca->suspend_wait_q);
819 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
827 struct qca_data *qca = hu->priv;
832 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
834 qca->ibs_recv_wacks++;
837 if (test_bit(QCA_SUSPENDING, &qca->flags)) {
838 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
842 switch (qca->tx_ibs_state) {
846 qca->tx_ibs_state);
851 while ((skb = skb_dequeue(&qca->tx_wait_q)))
852 skb_queue_tail(&qca->txq, skb);
855 del_timer(&qca->wake_retrans_timer);
856 idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
857 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
858 qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
864 qca->tx_ibs_state);
868 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
880 struct qca_data *qca = hu->priv;
882 BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
883 qca->tx_ibs_state);
885 if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
895 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
901 if (test_bit(QCA_IBS_DISABLED, &qca->flags) ||
902 test_bit(QCA_SUSPENDING, &qca->flags)) {
903 skb_queue_tail(&qca->txq, skb);
904 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
909 switch (qca->tx_ibs_state) {
912 skb_queue_tail(&qca->txq, skb);
913 idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
914 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
920 skb_queue_tail(&qca->tx_wait_q, skb);
922 qca->tx_ibs_state = HCI_IBS_TX_WAKING;
924 queue_work(qca->workqueue, &qca->ws_awake_device);
930 skb_queue_tail(&qca->tx_wait_q, skb);
935 qca->tx_ibs_state);
940 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
997 struct qca_data *qca = hu->priv;
1001 qca->controller_id);
1005 qca->fw_version);
1018 struct qca_data *qca = container_of(work, struct qca_data,
1020 struct hci_uart *hu = qca->hu;
1023 struct qca_memdump_info *qca_memdump = qca->qca_memdump;
1030 while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
1032 mutex_lock(&qca->hci_memdump_lock);
1036 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
1037 qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
1038 mutex_unlock(&qca->hci_memdump_lock);
1046 mutex_unlock(&qca->hci_memdump_lock);
1050 qca->qca_memdump = qca_memdump;
1053 qca->memdump_state = QCA_MEMDUMP_COLLECTING;
1066 set_bit(QCA_IBS_DISABLED, &qca->flags);
1067 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1074 mutex_unlock(&qca->hci_memdump_lock);
1078 queue_delayed_work(qca->workqueue,
1079 &qca->ctrl_memdump_timeout,
1088 kfree(qca->qca_memdump);
1089 qca->qca_memdump = NULL;
1090 qca->memdump_state = QCA_MEMDUMP_COLLECTED;
1091 cancel_delayed_work(&qca->ctrl_memdump_timeout);
1092 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1093 mutex_unlock(&qca->hci_memdump_lock);
1105 if (!test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) {
1109 mutex_unlock(&qca->hci_memdump_lock);
1165 cancel_delayed_work(&qca->ctrl_memdump_timeout);
1166 kfree(qca->qca_memdump);
1167 qca->qca_memdump = NULL;
1168 qca->memdump_state = QCA_MEMDUMP_COLLECTED;
1169 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1172 mutex_unlock(&qca->hci_memdump_lock);
1181 struct qca_data *qca = hu->priv;
1183 set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1184 skb_queue_tail(&qca->rx_memdump_q, skb);
1185 queue_work(qca->workqueue, &qca->ctrl_memdump_evt);
1193 struct qca_data *qca = hu->priv;
1195 if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) {
1209 complete(&qca->drop_ev_comp);
1259 struct qca_data *qca = hu->priv;
1264 qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
1266 if (IS_ERR(qca->rx_skb)) {
1267 int err = PTR_ERR(qca->rx_skb);
1269 qca->rx_skb = NULL;
1278 struct qca_data *qca = hu->priv;
1280 return skb_dequeue(&qca->txq);
1322 struct qca_data *qca = hu->priv;
1341 skb_queue_tail(&qca->txq, skb);
1346 while (!skb_queue_empty(&qca->txq))
1465 struct qca_data *qca = hu->priv;
1499 reinit_completion(&qca->drop_ev_comp);
1500 set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
1536 if (!wait_for_completion_timeout(&qca->drop_ev_comp,
1543 clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
1556 struct qca_data *qca = hu->priv;
1573 skb_queue_tail(&qca->txq, skb);
1582 struct qca_data *qca = hu->priv;
1584 wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION,
1587 clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1593 struct qca_data *qca = hu->priv;
1595 set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1596 set_bit(QCA_HW_ERROR_EVENT, &qca->flags);
1597 bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
1599 if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
1606 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1609 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
1617 mutex_lock(&qca->hci_memdump_lock);
1618 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
1621 if (qca->qca_memdump) {
1622 kfree(qca->qca_memdump);
1623 qca->qca_memdump = NULL;
1625 qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
1626 cancel_delayed_work(&qca->ctrl_memdump_timeout);
1628 mutex_unlock(&qca->hci_memdump_lock);
1630 if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
1631 qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
1632 cancel_work_sync(&qca->ctrl_memdump_evt);
1633 skb_queue_purge(&qca->rx_memdump_q);
1636 clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
1642 struct qca_data *qca = hu->priv;
1644 set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1645 if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
1646 set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1649 } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
1657 mutex_lock(&qca->hci_memdump_lock);
1658 if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
1659 qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
1660 if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
1667 mutex_unlock(&qca->hci_memdump_lock);
1775 struct qca_data *qca = hu->priv;
1804 clear_bit(QCA_BT_OFF, &qca->flags);
1821 struct qca_data *qca = hu->priv;
1834 clear_bit(QCA_ROM_FW, &qca->flags);
1836 set_bit(QCA_IBS_DISABLED, &qca->flags);
1868 qca->memdump_state = QCA_MEMDUMP_IDLE;
1875 clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
1938 clear_bit(QCA_IBS_DISABLED, &qca->flags);
1946 set_bit(QCA_ROM_FW, &qca->flags);
1953 set_bit(QCA_ROM_FW, &qca->flags);
1978 qca->fw_version = le16_to_cpu(ver.patch_ver);
1979 qca->controller_id = le16_to_cpu(ver.rom_ver);
2099 struct qca_data *qca = hu->priv;
2108 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
2109 set_bit(QCA_IBS_DISABLED, &qca->flags);
2111 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
2146 set_bit(QCA_BT_OFF, &qca->flags);
2152 struct qca_data *qca = hu->priv;
2158 del_timer_sync(&qca->wake_retrans_timer);
2159 del_timer_sync(&qca->tx_idle_timer);
2163 && qca->memdump_state == QCA_MEMDUMP_IDLE) {
2215 static int qca_init_regulators(struct qca_power *qca,
2222 bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL);
2229 ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk);
2239 qca->vreg_bulk = bulk;
2240 qca->num_vregs = num_vregs;
2412 struct qca_data *qca = hu->priv;
2417 if (test_bit(QCA_BT_OFF, &qca->flags) ||
2448 struct qca_data *qca = hu->priv;
2455 set_bit(QCA_SUSPENDING, &qca->flags);
2460 if (test_bit(QCA_ROM_FW, &qca->flags))
2467 if (test_bit(QCA_BT_OFF, &qca->flags) &&
2468 !test_bit(QCA_SSR_TRIGGERED, &qca->flags))
2471 if (test_bit(QCA_IBS_DISABLED, &qca->flags) ||
2472 test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
2473 wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ?
2481 wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED,
2484 if (test_bit(QCA_IBS_DISABLED, &qca->flags)) {
2491 cancel_work_sync(&qca->ws_awake_device);
2492 cancel_work_sync(&qca->ws_awake_rx);
2494 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
2497 switch (qca->tx_ibs_state) {
2499 del_timer(&qca->wake_retrans_timer);
2502 del_timer(&qca->tx_idle_timer);
2513 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
2514 qca->ibs_sent_slps++;
2522 BT_ERR("Spurious tx state %d", qca->tx_ibs_state);
2527 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
2541 ret = wait_event_interruptible_timeout(qca->suspend_wait_q,
2542 qca->rx_ibs_state == HCI_IBS_RX_ASLEEP,
2552 clear_bit(QCA_SUSPENDING, &qca->flags);
2562 struct qca_data *qca = hu->priv;
2564 clear_bit(QCA_SUSPENDING, &qca->flags);