Lines Matching refs:bt

130 	spinlock_t tx_lock;	/* spinlock for bt tx stream control */
131 spinlock_t rx_lock; /* spinlock for bt rx stream control */
193 static void mtk_btcvsd_snd_irq_enable(struct mtk_btcvsd_snd *bt)
195 regmap_update_bits(bt->infra, bt->infra_misc_offset,
196 bt->conn_bt_cvsd_mask, 0);
199 static void mtk_btcvsd_snd_irq_disable(struct mtk_btcvsd_snd *bt)
201 regmap_update_bits(bt->infra, bt->infra_misc_offset,
202 bt->conn_bt_cvsd_mask, bt->conn_bt_cvsd_mask);
205 static void mtk_btcvsd_snd_set_state(struct mtk_btcvsd_snd *bt,
209 dev_dbg(bt->dev, "%s(), stream %d, state %d, tx->state %d, rx->state %d, irq_disabled %d\n",
212 bt->tx->state, bt->rx->state, bt->irq_disabled);
216 if (bt->tx->state == BT_SCO_STATE_IDLE &&
217 bt->rx->state == BT_SCO_STATE_IDLE) {
218 if (!bt->irq_disabled) {
219 disable_irq(bt->irq_id);
220 mtk_btcvsd_snd_irq_disable(bt);
221 bt->irq_disabled = 1;
224 if (bt->irq_disabled) {
225 enable_irq(bt->irq_id);
226 mtk_btcvsd_snd_irq_enable(bt);
227 bt->irq_disabled = 0;
232 static int mtk_btcvsd_snd_tx_init(struct mtk_btcvsd_snd *bt)
234 memset(bt->tx, 0, sizeof(*bt->tx));
235 memset(bt->tx_packet_buf, 0, sizeof(bt->tx_packet_buf));
237 bt->tx->packet_size = BTCVSD_TX_PACKET_SIZE;
238 bt->tx->buf_size = BTCVSD_TX_BUF_SIZE;
239 bt->tx->timeout = 0;
240 bt->tx->rw_cnt = 0;
241 bt->tx->stream = SNDRV_PCM_STREAM_PLAYBACK;
245 static int mtk_btcvsd_snd_rx_init(struct mtk_btcvsd_snd *bt)
247 memset(bt->rx, 0, sizeof(*bt->rx));
248 memset(bt->rx_packet_buf, 0, sizeof(bt->rx_packet_buf));
250 bt->rx->packet_size = BTCVSD_RX_PACKET_SIZE;
251 bt->rx->buf_size = BTCVSD_RX_BUF_SIZE;
252 bt->rx->timeout = 0;
253 bt->rx->rw_cnt = 0;
254 bt->rx->stream = SNDRV_PCM_STREAM_CAPTURE;
258 static void get_tx_time_stamp(struct mtk_btcvsd_snd *bt,
261 ts->time_stamp_us = bt->tx->time_stamp;
262 ts->data_count_equi_time = bt->tx->buf_data_equivalent_time;
265 static void get_rx_time_stamp(struct mtk_btcvsd_snd *bt,
268 ts->time_stamp_us = bt->rx->time_stamp;
269 ts->data_count_equi_time = bt->rx->buf_data_equivalent_time;
317 /* write encoded mute data to bt sram */
318 static int btcvsd_tx_clean_buffer(struct mtk_btcvsd_snd *bt)
323 enum BT_SCO_BAND band = bt->band;
327 memset(bt->tx->temp_packet_buf, 170, SCO_PACKET_180);
329 memcpy(bt->tx->temp_packet_buf,
332 /* write mute data to bt tx sram buffer */
333 spin_lock_irqsave(&bt->tx_lock, flags);
334 num_valid_addr = bt->tx->buffer_info.num_valid_addr;
336 dev_info(bt->dev, "%s(), band %d, num_valid_addr %u\n",
342 dev_info(bt->dev, "%s(), clean addr 0x%lx\n", __func__,
343 bt->tx->buffer_info.bt_sram_addr[i]);
345 dst = (void *)bt->tx->buffer_info.bt_sram_addr[i];
348 bt->tx->temp_packet_buf, dst,
349 bt->tx->buffer_info.packet_length,
350 bt->tx->buffer_info.packet_num);
352 spin_unlock_irqrestore(&bt->tx_lock, flags);
357 static int mtk_btcvsd_read_from_bt(struct mtk_btcvsd_snd *bt,
371 connsys_addr_rx = *bt->bt_reg_pkt_r;
372 ap_addr_rx = (unsigned long)bt->bt_sram_bank2_base +
376 /* bt return 0xdeadfeed if read register during bt sleep */
377 dev_warn(bt->dev, "%s(), connsys_addr_rx == 0xdeadfeed",
385 bt->rx->temp_packet_buf, packet_length,
388 spin_lock_irqsave(&bt->rx_lock, flags);
390 packet_buf_ofs = (bt->rx->packet_w & SCO_RX_PACKET_MASK) *
391 bt->rx->packet_size;
392 memcpy(bt->rx_packet_buf + packet_buf_ofs,
393 bt->rx->temp_packet_buf + (SCO_RX_PLC_SIZE * i),
402 memcpy(bt->rx_packet_buf + packet_buf_ofs, (void *)&pv,
404 bt->rx->packet_w++;
406 spin_unlock_irqrestore(&bt->rx_lock, flags);
410 static int mtk_btcvsd_write_to_bt(struct mtk_btcvsd_snd *bt,
422 connsys_addr_tx = *bt->bt_reg_pkt_w;
423 ap_addr_tx = (unsigned long)bt->bt_sram_bank2_base +
427 /* bt return 0xdeadfeed if read register during bt sleep */
428 dev_warn(bt->dev, "%s(), connsys_addr_tx == 0xdeadfeed\n",
433 spin_lock_irqsave(&bt->tx_lock, flags);
435 memcpy(bt->tx->temp_packet_buf + (bt->tx->packet_size * i),
436 (bt->tx_packet_buf +
437 (bt->tx->packet_r % SCO_TX_PACKER_BUF_NUM) *
438 bt->tx->packet_size),
439 bt->tx->packet_size);
441 bt->tx->packet_r++;
443 spin_unlock_irqrestore(&bt->tx_lock, flags);
447 if (!bt->tx->mute) {
449 bt->tx->temp_packet_buf, dst,
453 /* store bt tx buffer sram info */
454 bt->tx->buffer_info.packet_length = packet_length;
455 bt->tx->buffer_info.packet_num = packet_num;
456 for (i = 0; i < bt->tx->buffer_info.num_valid_addr; i++) {
457 if (bt->tx->buffer_info.bt_sram_addr[i] == ap_addr_tx) {
465 spin_lock_irqsave(&bt->tx_lock, flags);
466 bt->tx->buffer_info.num_valid_addr++;
467 next_idx = bt->tx->buffer_info.num_valid_addr - 1;
468 bt->tx->buffer_info.bt_sram_addr[next_idx] = ap_addr_tx;
469 spin_unlock_irqrestore(&bt->tx_lock, flags);
470 dev_info(bt->dev, "%s(), new ap_addr_tx = 0x%lx, num_valid_addr %d\n",
472 bt->tx->buffer_info.num_valid_addr);
475 if (bt->tx->mute)
476 btcvsd_tx_clean_buffer(bt);
483 struct mtk_btcvsd_snd *bt = dev;
487 if (bt->rx->state != BT_SCO_STATE_RUNNING &&
488 bt->rx->state != BT_SCO_STATE_ENDING &&
489 bt->tx->state != BT_SCO_STATE_RUNNING &&
490 bt->tx->state != BT_SCO_STATE_ENDING &&
491 bt->tx->state != BT_SCO_STATE_LOOPBACK) {
492 dev_warn(bt->dev, "%s(), in idle state: rx->state: %d, tx->state: %d\n",
493 __func__, bt->rx->state, bt->tx->state);
497 control = *bt->bt_reg_ctl;
501 dev_warn(bt->dev, "%s(), ((control >> 31) & 1) == 0, control 0x%x\n",
507 dev_warn(bt->dev, "%s(), invalid packet_type %u, exit\n",
517 if (bt->tx->state == BT_SCO_STATE_LOOPBACK) {
522 connsys_addr_rx = *bt->bt_reg_pkt_r;
523 ap_addr_rx = (unsigned long)bt->bt_sram_bank2_base +
526 connsys_addr_tx = *bt->bt_reg_pkt_w;
527 ap_addr_tx = (unsigned long)bt->bt_sram_bank2_base +
532 /* bt return 0xdeadfeed if read reg during bt sleep */
533 dev_warn(bt->dev, "%s(), connsys_addr_tx == 0xdeadfeed\n",
542 bt->tx->temp_packet_buf,
546 bt->tx->temp_packet_buf, dst,
549 bt->rx->rw_cnt++;
550 bt->tx->rw_cnt++;
553 if (bt->rx->state == BT_SCO_STATE_RUNNING ||
554 bt->rx->state == BT_SCO_STATE_ENDING) {
555 if (bt->rx->xrun) {
556 if (bt->rx->packet_w - bt->rx->packet_r <=
562 bt->rx->xrun = 0;
563 dev_warn(bt->dev, "%s(), rx->xrun 0!\n",
568 if (!bt->rx->xrun &&
569 (bt->rx->packet_w - bt->rx->packet_r <=
571 mtk_btcvsd_read_from_bt(bt,
577 bt->rx->rw_cnt++;
579 bt->rx->xrun = 1;
580 dev_warn(bt->dev, "%s(), rx->xrun 1\n", __func__);
585 bt->tx->timeout = 0;
586 if ((bt->tx->state == BT_SCO_STATE_RUNNING ||
587 bt->tx->state == BT_SCO_STATE_ENDING) &&
588 bt->tx->trigger_start) {
589 if (bt->tx->xrun) {
593 if (bt->tx->packet_w - bt->tx->packet_r >=
595 bt->tx->xrun = 0;
596 dev_warn(bt->dev, "%s(), tx->xrun 0\n",
601 if ((!bt->tx->xrun &&
602 (bt->tx->packet_w - bt->tx->packet_r >= buf_cnt_tx)) ||
603 bt->tx->state == BT_SCO_STATE_ENDING) {
604 mtk_btcvsd_write_to_bt(bt,
609 bt->tx->rw_cnt++;
611 bt->tx->xrun = 1;
612 dev_warn(bt->dev, "%s(), tx->xrun 1\n", __func__);
616 *bt->bt_reg_ctl &= ~BT_CVSD_CLEAR;
618 if (bt->rx->state == BT_SCO_STATE_RUNNING ||
619 bt->rx->state == BT_SCO_STATE_ENDING) {
620 bt->rx->wait_flag = 1;
621 wake_up_interruptible(&bt->rx_wait);
622 snd_pcm_period_elapsed(bt->rx->substream);
624 if (bt->tx->state == BT_SCO_STATE_RUNNING ||
625 bt->tx->state == BT_SCO_STATE_ENDING) {
626 bt->tx->wait_flag = 1;
627 wake_up_interruptible(&bt->tx_wait);
628 snd_pcm_period_elapsed(bt->tx->substream);
633 *bt->bt_reg_ctl &= ~BT_CVSD_CLEAR;
637 static int wait_for_bt_irq(struct mtk_btcvsd_snd *bt,
651 ret = wait_event_interruptible_timeout(bt->tx_wait,
655 ret = wait_event_interruptible_timeout(bt->rx_wait,
664 dev_warn(bt->dev, "%s(), stream %d, timeout %llu, limit %llu, ret %d, flag %d\n",
675 dev_warn(bt->dev, "%s(), stream %d, error, trial left %d\n",
684 dev_warn(bt->dev, "%s(), stream %d, error, timeout, condition is false, trial left %d\n",
698 static ssize_t mtk_btcvsd_snd_read(struct mtk_btcvsd_snd *bt,
705 unsigned int packet_size = bt->rx->packet_size;
708 spin_lock_irqsave(&bt->rx_lock, flags);
710 avail = (bt->rx->packet_w - bt->rx->packet_r) * packet_size;
712 cur_read_idx = (bt->rx->packet_r & SCO_RX_PACKET_MASK) *
714 spin_unlock_irqrestore(&bt->rx_lock, flags);
717 int ret = wait_for_bt_irq(bt, bt->rx);
728 dev_warn(bt->dev, "%s(), count %zu or d %lu is not multiple of packet_size %dd\n",
741 cont = bt->rx->buf_size - cur_read_idx;
745 if (copy_to_iter(bt->rx_packet_buf + cur_read_idx,
747 dev_warn(bt->dev, "%s(), copy_to_iter fail\n",
752 spin_lock_irqsave(&bt->rx_lock, flags);
753 bt->rx->packet_r += read_size / packet_size;
754 spin_unlock_irqrestore(&bt->rx_lock, flags);
764 bt->rx->time_stamp = sched_clock();
765 bt->rx->buf_data_equivalent_time =
766 (unsigned long long)(bt->rx->packet_w - bt->rx->packet_r) *
768 bt->rx->buf_data_equivalent_time += read_count * SCO_RX_PLC_SIZE *
771 bt->rx->buf_data_equivalent_time *= 1000;
776 static ssize_t mtk_btcvsd_snd_write(struct mtk_btcvsd_snd *bt,
782 unsigned int packet_size = bt->tx->packet_size;
788 bt->tx->time_stamp = sched_clock();
789 bt->tx->buf_data_equivalent_time =
790 (unsigned long long)(bt->tx->packet_w - bt->tx->packet_r) *
794 bt->tx->buf_data_equivalent_time *= 1000;
797 spin_lock_irqsave(&bt->tx_lock, flags);
799 avail = bt->tx->buf_size -
800 (bt->tx->packet_w - bt->tx->packet_r) * packet_size;
802 cur_write_idx = (bt->tx->packet_w % SCO_TX_PACKER_BUF_NUM) *
804 spin_unlock_irqrestore(&bt->tx_lock, flags);
807 int ret = wait_for_bt_irq(bt, bt->tx);
815 /* count must be multiple of bt->tx->packet_size */
818 dev_warn(bt->dev, "%s(), count %zu or avail %d is not multiple of packet_size %d\n",
830 cont = bt->tx->buf_size - cur_write_idx;
834 if (copy_from_iter(bt->tx_packet_buf + cur_write_idx,
836 dev_warn(bt->dev, "%s(), copy_from_iter fail\n",
841 spin_lock_irqsave(&bt->tx_lock, flags);
842 bt->tx->packet_w += write_size / packet_size;
843 spin_unlock_irqrestore(&bt->tx_lock, flags);
851 (struct mtk_btcvsd_snd *bt, struct snd_pcm_substream *substream)
854 return bt->tx;
856 return bt->rx;
874 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
877 dev_dbg(bt->dev, "%s(), stream %d, substream %p\n",
883 ret = mtk_btcvsd_snd_tx_init(bt);
884 bt->tx->substream = substream;
886 ret = mtk_btcvsd_snd_rx_init(bt);
887 bt->rx->substream = substream;
896 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
897 struct mtk_btcvsd_snd_stream *bt_stream = get_bt_stream(bt, substream);
899 dev_dbg(bt->dev, "%s(), stream %d\n", __func__, substream->stream);
901 mtk_btcvsd_snd_set_state(bt, bt_stream, BT_SCO_STATE_IDLE);
910 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
913 params_buffer_bytes(hw_params) % bt->tx->packet_size != 0) {
914 dev_warn(bt->dev, "%s(), error, buffer size %d not valid\n",
927 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
930 btcvsd_tx_clean_buffer(bt);
938 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
939 struct mtk_btcvsd_snd_stream *bt_stream = get_bt_stream(bt, substream);
941 dev_dbg(bt->dev, "%s(), stream %d\n", __func__, substream->stream);
943 mtk_btcvsd_snd_set_state(bt, bt_stream, BT_SCO_STATE_RUNNING);
950 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
951 struct mtk_btcvsd_snd_stream *bt_stream = get_bt_stream(bt, substream);
955 dev_dbg(bt->dev, "%s(), stream %d, cmd %d\n",
970 mtk_btcvsd_snd_set_state(bt, bt_stream, BT_SCO_STATE_ENDING);
981 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
987 spinlock_t *lock; /* spinlock for bt stream control */
991 lock = &bt->tx_lock;
992 bt_stream = bt->tx;
994 lock = &bt->rx_lock;
995 bt_stream = bt->rx;
1000 bt->tx->packet_r : bt->rx->packet_w;
1031 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
1034 return mtk_btcvsd_snd_write(bt, buf, count);
1036 return mtk_btcvsd_snd_read(bt, buf, count);
1050 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
1052 ucontrol->value.integer.value[0] = bt->band;
1060 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
1066 bt->band = ucontrol->value.integer.value[0];
1067 dev_dbg(bt->dev, "%s(), band %d\n", __func__, bt->band);
1075 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
1076 bool lpbk_en = bt->tx->state == BT_SCO_STATE_LOOPBACK;
1086 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
1089 mtk_btcvsd_snd_set_state(bt, bt->tx, BT_SCO_STATE_LOOPBACK);
1090 mtk_btcvsd_snd_set_state(bt, bt->rx, BT_SCO_STATE_LOOPBACK);
1092 mtk_btcvsd_snd_set_state(bt, bt->tx, BT_SCO_STATE_RUNNING);
1093 mtk_btcvsd_snd_set_state(bt, bt->rx, BT_SCO_STATE_RUNNING);
1102 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
1104 if (!bt->tx) {
1109 ucontrol->value.integer.value[0] = bt->tx->mute;
1117 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
1119 if (!bt->tx)
1122 bt->tx->mute = ucontrol->value.integer.value[0];
1130 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
1132 if (!bt->rx)
1135 ucontrol->value.integer.value[0] = bt->rx->rw_cnt ? 1 : 0;
1143 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
1145 if (!bt->rx)
1148 ucontrol->value.integer.value[0] = bt->rx->timeout;
1149 bt->rx->timeout = 0;
1157 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
1164 get_rx_time_stamp(bt, &time_buffer_info_rx);
1166 dev_dbg(bt->dev, "%s(), time_stamp_us %llu, data_count_equi_time %llu",
1173 dev_warn(bt->dev, "%s(), copy_to_user fail", __func__);
1184 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
1186 if (!bt->tx)
1189 ucontrol->value.integer.value[0] = bt->tx->rw_cnt ? 1 : 0;
1197 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
1199 ucontrol->value.integer.value[0] = bt->tx->timeout;
1207 struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(cmpnt);
1214 get_tx_time_stamp(bt, &time_buffer_info_tx);
1216 dev_dbg(bt->dev, "%s(), time_stamp_us %llu, data_count_equi_time %llu",
1223 dev_warn(bt->dev, "%s(), copy_to_user fail", __func__);