Lines Matching refs:h5
63 struct h5 {
110 int (*setup)(struct h5 *h5);
111 void (*open)(struct h5 *h5);
112 void (*close)(struct h5 *h5);
113 int (*suspend)(struct h5 *h5);
114 int (*resume)(struct h5 *h5);
124 static void h5_reset_rx(struct h5 *h5);
128 struct h5 *h5 = hu->priv;
139 skb_queue_tail(&h5->unrel, nskb);
142 static u8 h5_cfg_field(struct h5 *h5)
145 return h5->tx_win & 0x07;
152 struct h5 *h5 = from_timer(h5, t, timer);
153 struct hci_uart *hu = h5->hu;
159 if (h5->state == H5_UNINITIALIZED)
162 if (h5->state == H5_INITIALIZED) {
163 conf_req[2] = h5_cfg_field(h5);
167 if (h5->state != H5_ACTIVE) {
168 mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
172 if (h5->sleep != H5_AWAKE) {
173 h5->sleep = H5_SLEEPING;
177 BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
179 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
181 while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
182 h5->tx_seq = (h5->tx_seq - 1) & 0x07;
183 skb_queue_head(&h5->rel, skb);
186 spin_unlock_irqrestore(&h5->unack.lock, flags);
194 struct h5 *h5 = hu->priv;
198 h5->state = H5_UNINITIALIZED;
200 del_timer(&h5->timer);
202 skb_queue_purge(&h5->rel);
203 skb_queue_purge(&h5->unrel);
204 skb_queue_purge(&h5->unack);
206 h5->tx_seq = 0;
207 h5->tx_ack = 0;
215 struct h5 *h5;
221 h5 = serdev_device_get_drvdata(hu->serdev);
223 h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
224 if (!h5)
228 hu->priv = h5;
229 h5->hu = hu;
231 skb_queue_head_init(&h5->unack);
232 skb_queue_head_init(&h5->rel);
233 skb_queue_head_init(&h5->unrel);
235 h5_reset_rx(h5);
237 timer_setup(&h5->timer, h5_timed_event, 0);
239 h5->tx_win = H5_TX_WIN_MAX;
241 if (h5->vnd && h5->vnd->open)
242 h5->vnd->open(h5);
248 mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
255 struct h5 *h5 = hu->priv;
257 del_timer_sync(&h5->timer);
259 skb_queue_purge(&h5->unack);
260 skb_queue_purge(&h5->rel);
261 skb_queue_purge(&h5->unrel);
263 kfree_skb(h5->rx_skb);
264 h5->rx_skb = NULL;
266 if (h5->vnd && h5->vnd->close)
267 h5->vnd->close(h5);
270 kfree(h5);
277 struct h5 *h5 = hu->priv;
279 if (h5->vnd && h5->vnd->setup)
280 return h5->vnd->setup(h5);
285 static void h5_pkt_cull(struct h5 *h5)
292 spin_lock_irqsave(&h5->unack.lock, flags);
294 to_remove = skb_queue_len(&h5->unack);
298 seq = h5->tx_seq;
301 if (h5->rx_ack == seq)
308 if (seq != h5->rx_ack)
312 skb_queue_walk_safe(&h5->unack, skb, tmp) {
316 __skb_unlink(skb, &h5->unack);
320 if (skb_queue_empty(&h5->unack))
321 del_timer(&h5->timer);
324 spin_unlock_irqrestore(&h5->unack.lock, flags);
329 struct h5 *h5 = hu->priv;
337 const unsigned char *hdr = h5->rx_skb->data;
338 const unsigned char *data = &h5->rx_skb->data[4];
348 conf_req[2] = h5_cfg_field(h5);
351 if (h5->state == H5_ACTIVE)
355 if (h5->state == H5_ACTIVE)
357 h5->state = H5_INITIALIZED;
364 h5->tx_win = (data[2] & 0x07);
365 BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
366 h5->state = H5_ACTIVE;
371 h5->sleep = H5_SLEEPING;
375 h5->sleep = H5_AWAKE;
379 h5->sleep = H5_AWAKE;
390 struct h5 *h5 = hu->priv;
391 const unsigned char *hdr = h5->rx_skb->data;
394 h5->tx_ack = (h5->tx_ack + 1) % 8;
395 set_bit(H5_TX_ACK_REQ, &h5->flags);
399 h5->rx_ack = H5_HDR_ACK(hdr);
401 h5_pkt_cull(h5);
408 hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
411 skb_pull(h5->rx_skb, 4);
413 hci_recv_frame(hu->hdev, h5->rx_skb);
414 h5->rx_skb = NULL;
423 h5_reset_rx(h5);
435 struct h5 *h5 = hu->priv;
436 const unsigned char *hdr = h5->rx_skb->data;
439 h5->rx_func = h5_rx_crc;
440 h5->rx_pending = 2;
450 struct h5 *h5 = hu->priv;
451 const unsigned char *hdr = h5->rx_skb->data;
460 h5_reset_rx(h5);
464 if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
466 H5_HDR_SEQ(hdr), h5->tx_ack);
467 set_bit(H5_TX_ACK_REQ, &h5->flags);
469 h5_reset_rx(h5);
473 if (h5->state != H5_ACTIVE &&
476 h5_reset_rx(h5);
480 h5->rx_func = h5_rx_payload;
481 h5->rx_pending = H5_HDR_LEN(hdr);
488 struct h5 *h5 = hu->priv;
493 h5->rx_func = h5_rx_3wire_hdr;
494 h5->rx_pending = 4;
496 h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
497 if (!h5->rx_skb) {
499 h5_reset_rx(h5);
503 h5->rx_skb->dev = (void *)hu->hdev;
510 struct h5 *h5 = hu->priv;
513 h5->rx_func = h5_rx_pkt_start;
518 static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
523 if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
524 set_bit(H5_RX_ESC, &h5->flags);
528 if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
538 h5_reset_rx(h5);
543 skb_put_data(h5->rx_skb, byte, 1);
544 h5->rx_pending--;
546 BT_DBG("unslipped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
549 static void h5_reset_rx(struct h5 *h5)
551 if (h5->rx_skb) {
552 kfree_skb(h5->rx_skb);
553 h5->rx_skb = NULL;
556 h5->rx_func = h5_rx_delimiter;
557 h5->rx_pending = 0;
558 clear_bit(H5_RX_ESC, &h5->flags);
563 struct h5 *h5 = hu->priv;
566 BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
572 if (h5->rx_pending > 0) {
575 h5_reset_rx(h5);
579 h5_unslip_one_byte(h5, *ptr);
585 processed = h5->rx_func(hu, *ptr);
604 struct h5 *h5 = hu->priv;
612 if (h5->state != H5_ACTIVE) {
621 skb_queue_tail(&h5->rel, skb);
626 skb_queue_tail(&h5->unrel, skb);
686 struct h5 *h5 = hu->priv;
710 hdr[0] = h5->tx_ack << 3;
711 clear_bit(H5_TX_ACK_REQ, &h5->flags);
716 hdr[0] |= h5->tx_seq;
717 h5->tx_seq = (h5->tx_seq + 1) % 8;
742 struct h5 *h5 = hu->priv;
746 if (h5->sleep != H5_AWAKE) {
749 if (h5->sleep == H5_WAKING_UP)
752 h5->sleep = H5_WAKING_UP;
755 mod_timer(&h5->timer, jiffies + HZ / 100);
759 skb = skb_dequeue(&h5->unrel);
768 skb_queue_head(&h5->unrel, skb);
772 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
774 if (h5->unack.qlen >= h5->tx_win)
777 skb = skb_dequeue(&h5->rel);
782 __skb_queue_tail(&h5->unack, skb);
783 mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
784 spin_unlock_irqrestore(&h5->unack.lock, flags);
788 skb_queue_head(&h5->rel, skb);
793 spin_unlock_irqrestore(&h5->unack.lock, flags);
795 if (test_bit(H5_TX_ACK_REQ, &h5->flags))
822 struct h5 *h5;
825 h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
826 if (!h5)
829 h5->hu = &h5->serdev_hu;
830 h5->serdev_hu.serdev = serdev;
831 serdev_device_set_drvdata(serdev, h5);
841 h5->vnd = data->vnd;
842 h5->id = (char *)match->id;
844 if (h5->vnd->acpi_gpio_map)
846 h5->vnd->acpi_gpio_map);
852 h5->vnd = data->vnd;
856 set_bit(H5_WAKEUP_DISABLE, &h5->flags);
858 h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
859 if (IS_ERR(h5->enable_gpio))
860 return PTR_ERR(h5->enable_gpio);
862 h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake",
864 if (IS_ERR(h5->device_wake_gpio))
865 return PTR_ERR(h5->device_wake_gpio);
867 return hci_uart_register_device_priv(&h5->serdev_hu, &h5p,
868 h5->vnd->sizeof_priv);
873 struct h5 *h5 = serdev_device_get_drvdata(serdev);
875 hci_uart_unregister_device(&h5->serdev_hu);
880 struct h5 *h5 = dev_get_drvdata(dev);
883 if (h5->vnd && h5->vnd->suspend)
884 ret = h5->vnd->suspend(h5);
891 struct h5 *h5 = dev_get_drvdata(dev);
894 if (h5->vnd && h5->vnd->resume)
895 ret = h5->vnd->resume(h5);
901 static int h5_btrtl_setup(struct h5 *h5)
911 btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id);
915 err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev,
922 skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data),
925 rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n");
934 serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate);
935 serdev_device_set_flow_control(h5->hu->serdev, flow_control);
938 set_bit(H5_HW_FLOW_CONTROL, &h5->flags);
940 err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev);
946 btrtl_set_quirks(h5->hu->hdev, btrtl_dev);
954 static void h5_btrtl_open(struct h5 *h5)
961 if (test_bit(H5_WAKEUP_DISABLE, &h5->flags))
962 set_bit(HCI_UART_NO_SUSPEND_NOTIFIER, &h5->hu->flags);
965 serdev_device_set_flow_control(h5->hu->serdev, false);
966 serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
967 serdev_device_set_baudrate(h5->hu->serdev, 115200);
969 if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) {
970 pm_runtime_set_active(&h5->hu->serdev->dev);
971 pm_runtime_use_autosuspend(&h5->hu->serdev->dev);
972 pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev,
974 pm_runtime_enable(&h5->hu->serdev->dev);
978 gpiod_set_value_cansleep(h5->enable_gpio, 0);
979 gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
983 gpiod_set_value_cansleep(h5->enable_gpio, 1);
984 gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
988 static void h5_btrtl_close(struct h5 *h5)
990 if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags))
991 pm_runtime_disable(&h5->hu->serdev->dev);
993 gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
994 gpiod_set_value_cansleep(h5->enable_gpio, 0);
1003 static int h5_btrtl_suspend(struct h5 *h5)
1005 serdev_device_set_flow_control(h5->hu->serdev, false);
1006 gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
1008 if (test_bit(H5_WAKEUP_DISABLE, &h5->flags))
1009 gpiod_set_value_cansleep(h5->enable_gpio, 0);
1034 static int h5_btrtl_resume(struct h5 *h5)
1036 if (test_bit(H5_WAKEUP_DISABLE, &h5->flags)) {
1046 reprobe->dev = get_device(&h5->hu->serdev->dev);
1049 gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
1051 if (test_bit(H5_HW_FLOW_CONTROL, &h5->flags))
1052 serdev_device_set_flow_control(h5->hu->serdev, true);