Lines Matching refs:shdlc

3  * shdlc Link Layer Control
8 #define pr_fmt(fmt) "shdlc: %s: " fmt, __func__
104 print_hex_dump(KERN_DEBUG, "shdlc: ", DUMP_PREFIX_OFFSET, \
126 static struct sk_buff *llc_shdlc_alloc_skb(const struct llc_shdlc *shdlc,
131 skb = alloc_skb(shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM +
132 shdlc->tx_tailroom + payload_len, GFP_KERNEL);
134 skb_reserve(skb, shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM);
140 static int llc_shdlc_send_s_frame(const struct llc_shdlc *shdlc,
148 skb = llc_shdlc_alloc_skb(shdlc, 0);
154 r = shdlc->xmit_to_drv(shdlc->hdev, skb);
162 static int llc_shdlc_send_u_frame(const struct llc_shdlc *shdlc,
172 r = shdlc->xmit_to_drv(shdlc->hdev, skb);
183 static void llc_shdlc_reset_t2(struct llc_shdlc *shdlc, int y_nr)
186 int dnr = shdlc->dnr; /* MUST initially be < y_nr */
193 skb = skb_dequeue(&shdlc->ack_pending_q);
199 if (skb_queue_empty(&shdlc->ack_pending_q)) {
200 if (shdlc->t2_active) {
201 del_timer_sync(&shdlc->t2_timer);
202 shdlc->t2_active = false;
208 skb = skb_peek(&shdlc->ack_pending_q);
210 mod_timer(&shdlc->t2_timer, *(unsigned long *)skb->cb +
212 shdlc->t2_active = true;
223 static void llc_shdlc_rcv_i_frame(struct llc_shdlc *shdlc,
231 if (shdlc->state != SHDLC_CONNECTED)
234 if (x_ns != shdlc->nr) {
235 llc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr);
239 if (shdlc->t1_active == false) {
240 shdlc->t1_active = true;
241 mod_timer(&shdlc->t1_timer, jiffies +
242 msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w)));
247 shdlc->rcv_to_hci(shdlc->hdev, skb);
251 shdlc->nr = (shdlc->nr + 1) % 8;
253 if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
254 llc_shdlc_reset_t2(shdlc, y_nr);
256 shdlc->dnr = y_nr;
263 static void llc_shdlc_rcv_ack(struct llc_shdlc *shdlc, int y_nr)
267 if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
268 llc_shdlc_reset_t2(shdlc, y_nr);
269 shdlc->dnr = y_nr;
273 static void llc_shdlc_requeue_ack_pending(struct llc_shdlc *shdlc)
277 pr_debug("ns reset to %d\n", shdlc->dnr);
279 while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) {
281 skb_queue_head(&shdlc->send_q, skb);
283 shdlc->ns = shdlc->dnr;
286 static void llc_shdlc_rcv_rej(struct llc_shdlc *shdlc, int y_nr)
292 if (llc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) {
293 if (shdlc->t2_active) {
294 del_timer_sync(&shdlc->t2_timer);
295 shdlc->t2_active = false;
299 if (shdlc->dnr != y_nr) {
300 while ((shdlc->dnr = ((shdlc->dnr + 1) % 8)) != y_nr) {
301 skb = skb_dequeue(&shdlc->ack_pending_q);
306 llc_shdlc_requeue_ack_pending(shdlc);
311 static void llc_shdlc_rcv_s_frame(struct llc_shdlc *shdlc,
316 if (shdlc->state != SHDLC_CONNECTED)
321 llc_shdlc_rcv_ack(shdlc, nr);
322 if (shdlc->rnr == true) { /* see SHDLC 10.7.7 */
323 shdlc->rnr = false;
324 if (shdlc->send_q.qlen == 0) {
325 skb = llc_shdlc_alloc_skb(shdlc, 0);
327 skb_queue_tail(&shdlc->send_q, skb);
332 llc_shdlc_rcv_rej(shdlc, nr);
335 llc_shdlc_rcv_ack(shdlc, nr);
336 shdlc->rnr = true;
343 static void llc_shdlc_connect_complete(struct llc_shdlc *shdlc, int r)
347 del_timer_sync(&shdlc->connect_timer);
350 shdlc->ns = 0;
351 shdlc->nr = 0;
352 shdlc->dnr = 0;
354 shdlc->state = SHDLC_HALF_CONNECTED;
356 shdlc->state = SHDLC_DISCONNECTED;
359 shdlc->connect_result = r;
361 wake_up(shdlc->connect_wq);
364 static int llc_shdlc_connect_initiate(const struct llc_shdlc *shdlc)
370 skb = llc_shdlc_alloc_skb(shdlc, 2);
377 return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
380 static int llc_shdlc_connect_send_ua(const struct llc_shdlc *shdlc)
386 skb = llc_shdlc_alloc_skb(shdlc, 0);
390 return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA);
393 static void llc_shdlc_rcv_u_frame(struct llc_shdlc *shdlc,
405 switch (shdlc->state) {
421 shdlc->w = w;
422 shdlc->srej_support = srej_support;
423 r = llc_shdlc_connect_send_ua(shdlc);
424 llc_shdlc_connect_complete(shdlc, r);
438 shdlc->hard_fault = -ECONNRESET;
445 if ((shdlc->state == SHDLC_CONNECTING &&
446 shdlc->connect_tries > 0) ||
447 (shdlc->state == SHDLC_NEGOTIATING)) {
448 llc_shdlc_connect_complete(shdlc, 0);
449 shdlc->state = SHDLC_CONNECTED;
459 static void llc_shdlc_handle_rcv_queue(struct llc_shdlc *shdlc)
468 if (shdlc->rcv_q.qlen)
469 pr_debug("rcvQlen=%d\n", shdlc->rcv_q.qlen);
471 while ((skb = skb_dequeue(&shdlc->rcv_q)) != NULL) {
477 if (shdlc->state == SHDLC_HALF_CONNECTED)
478 shdlc->state = SHDLC_CONNECTED;
482 llc_shdlc_rcv_i_frame(shdlc, skb, ns, nr);
485 if (shdlc->state == SHDLC_HALF_CONNECTED)
486 shdlc->state = SHDLC_CONNECTED;
490 llc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr);
495 llc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier);
518 static void llc_shdlc_handle_send_queue(struct llc_shdlc *shdlc)
524 if (shdlc->send_q.qlen)
527 shdlc->send_q.qlen, shdlc->ns, shdlc->dnr,
528 shdlc->rnr == false ? "false" : "true",
529 shdlc->w - llc_shdlc_w_used(shdlc->ns, shdlc->dnr),
530 shdlc->ack_pending_q.qlen);
532 while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w &&
533 (shdlc->rnr == false)) {
535 if (shdlc->t1_active) {
536 del_timer_sync(&shdlc->t1_timer);
537 shdlc->t1_active = false;
541 skb = skb_dequeue(&shdlc->send_q);
543 *(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) |
544 shdlc->nr;
546 pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns,
547 shdlc->nr);
548 SHDLC_DUMP_SKB("shdlc frame written", skb);
550 r = shdlc->xmit_to_drv(shdlc->hdev, skb);
552 shdlc->hard_fault = r;
556 shdlc->ns = (shdlc->ns + 1) % 8;
561 skb_queue_tail(&shdlc->ack_pending_q, skb);
563 if (shdlc->t2_active == false) {
564 shdlc->t2_active = true;
565 mod_timer(&shdlc->t2_timer, time_sent +
574 struct llc_shdlc *shdlc = from_timer(shdlc, t, connect_timer);
578 schedule_work(&shdlc->sm_work);
583 struct llc_shdlc *shdlc = from_timer(shdlc, t, t1_timer);
587 schedule_work(&shdlc->sm_work);
592 struct llc_shdlc *shdlc = from_timer(shdlc, t, t2_timer);
596 schedule_work(&shdlc->sm_work);
601 struct llc_shdlc *shdlc = container_of(work, struct llc_shdlc, sm_work);
606 mutex_lock(&shdlc->state_mutex);
608 switch (shdlc->state) {
610 skb_queue_purge(&shdlc->rcv_q);
611 skb_queue_purge(&shdlc->send_q);
612 skb_queue_purge(&shdlc->ack_pending_q);
615 if (shdlc->hard_fault) {
616 llc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
620 if (shdlc->connect_tries++ < 5)
621 r = llc_shdlc_connect_initiate(shdlc);
625 llc_shdlc_connect_complete(shdlc, r);
627 mod_timer(&shdlc->connect_timer, jiffies +
630 shdlc->state = SHDLC_NEGOTIATING;
634 if (timer_pending(&shdlc->connect_timer) == 0) {
635 shdlc->state = SHDLC_CONNECTING;
636 schedule_work(&shdlc->sm_work);
639 llc_shdlc_handle_rcv_queue(shdlc);
641 if (shdlc->hard_fault) {
642 llc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
648 llc_shdlc_handle_rcv_queue(shdlc);
649 llc_shdlc_handle_send_queue(shdlc);
651 if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) {
655 shdlc->t1_active = false;
656 r = llc_shdlc_send_s_frame(shdlc, S_FRAME_RR,
657 shdlc->nr);
659 shdlc->hard_fault = r;
662 if (shdlc->t2_active && timer_pending(&shdlc->t2_timer) == 0) {
666 shdlc->t2_active = false;
668 llc_shdlc_requeue_ack_pending(shdlc);
669 llc_shdlc_handle_send_queue(shdlc);
672 if (shdlc->hard_fault)
673 shdlc->llc_failure(shdlc->hdev, shdlc->hard_fault);
678 mutex_unlock(&shdlc->state_mutex);
682 * Called from syscall context to establish shdlc link. Sleeps until
685 static int llc_shdlc_connect(struct llc_shdlc *shdlc)
691 mutex_lock(&shdlc->state_mutex);
693 shdlc->state = SHDLC_CONNECTING;
694 shdlc->connect_wq = &connect_wq;
695 shdlc->connect_tries = 0;
696 shdlc->connect_result = 1;
698 mutex_unlock(&shdlc->state_mutex);
700 schedule_work(&shdlc->sm_work);
702 wait_event(connect_wq, shdlc->connect_result != 1);
704 return shdlc->connect_result;
707 static void llc_shdlc_disconnect(struct llc_shdlc *shdlc)
711 mutex_lock(&shdlc->state_mutex);
713 shdlc->state = SHDLC_DISCONNECTED;
715 mutex_unlock(&shdlc->state_mutex);
717 schedule_work(&shdlc->sm_work);
721 * Receive an incoming shdlc frame. Frame has already been crc-validated.
725 static void llc_shdlc_recv_frame(struct llc_shdlc *shdlc, struct sk_buff *skb)
729 shdlc->hard_fault = -EREMOTEIO;
732 skb_queue_tail(&shdlc->rcv_q, skb);
735 schedule_work(&shdlc->sm_work);
743 struct llc_shdlc *shdlc;
748 shdlc = kzalloc(sizeof(struct llc_shdlc), GFP_KERNEL);
749 if (shdlc == NULL)
752 mutex_init(&shdlc->state_mutex);
753 shdlc->state = SHDLC_DISCONNECTED;
755 timer_setup(&shdlc->connect_timer, llc_shdlc_connect_timeout, 0);
756 timer_setup(&shdlc->t1_timer, llc_shdlc_t1_timeout, 0);
757 timer_setup(&shdlc->t2_timer, llc_shdlc_t2_timeout, 0);
759 shdlc->w = SHDLC_MAX_WINDOW;
760 shdlc->srej_support = SHDLC_SREJ_SUPPORT;
762 skb_queue_head_init(&shdlc->rcv_q);
763 skb_queue_head_init(&shdlc->send_q);
764 skb_queue_head_init(&shdlc->ack_pending_q);
766 INIT_WORK(&shdlc->sm_work, llc_shdlc_sm_work);
768 shdlc->hdev = hdev;
769 shdlc->xmit_to_drv = xmit_to_drv;
770 shdlc->rcv_to_hci = rcv_to_hci;
771 shdlc->tx_headroom = tx_headroom;
772 shdlc->tx_tailroom = tx_tailroom;
773 shdlc->llc_failure = llc_failure;
775 return shdlc;
780 struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
782 skb_queue_purge(&shdlc->rcv_q);
783 skb_queue_purge(&shdlc->send_q);
784 skb_queue_purge(&shdlc->ack_pending_q);
786 kfree(shdlc);
791 struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
793 return llc_shdlc_connect(shdlc);
798 struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
800 llc_shdlc_disconnect(shdlc);
807 struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
809 llc_shdlc_recv_frame(shdlc, skb);
814 struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
816 skb_queue_tail(&shdlc->send_q, skb);
818 schedule_work(&shdlc->sm_work);