Lines Matching defs:strp

15 static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
17 if (strp->stopped)
20 strp->stopped = 1;
23 WRITE_ONCE(strp->sk->sk_err, -err);
26 sk_error_report(strp->sk);
29 static void tls_strp_anchor_free(struct tls_strparser *strp)
31 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
34 if (!strp->copy_mode)
36 consume_skb(strp->anchor);
37 strp->anchor = NULL;
41 tls_strp_skb_copy(struct tls_strparser *strp, struct sk_buff *in_skb,
48 &err, strp->sk->sk_allocation);
68 static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp)
73 skb = tls_strp_skb_copy(strp, strp->anchor, strp->stm.offset,
74 strp->stm.full_len);
86 struct tls_strparser *strp = &ctx->strp;
89 DEBUG_NET_WARN_ON_ONCE(!strp->anchor->decrypted);
97 if (strp->copy_mode) {
104 skb = alloc_skb(0, strp->sk->sk_allocation);
108 swap(strp->anchor, skb);
112 return tls_strp_msg_make_copy(strp);
121 struct tls_strparser *strp = &ctx->strp;
124 if (strp->copy_mode)
127 skb = tls_strp_msg_make_copy(strp);
131 tls_strp_anchor_free(strp);
132 strp->anchor = skb;
134 tcp_read_done(strp->sk, strp->stm.full_len);
135 strp->copy_mode = 1;
144 int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst)
146 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
148 if (strp->copy_mode) {
154 skb = alloc_skb(0, strp->sk->sk_allocation);
158 __skb_queue_tail(dst, strp->anchor);
159 strp->anchor = skb;
164 offset = strp->stm.offset;
165 len = strp->stm.full_len;
177 clone = skb_clone(iter, strp->sk->sk_allocation);
191 static void tls_strp_flush_anchor_copy(struct tls_strparser *strp)
193 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
201 if (strp->copy_mode) {
205 strp->copy_mode = 0;
206 strp->mixed_decrypted = 0;
209 static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
221 if (!strp->stm.full_len) {
233 sz = tls_rx_msg_size(strp, skb);
253 strp->stm.full_len = sz;
254 if (!strp->stm.full_len)
259 while (len && strp->stm.full_len > skb->len) {
260 chunk = min_t(size_t, len, strp->stm.full_len - skb->len);
279 static int tls_strp_copyin_skb(struct tls_strparser *strp, struct sk_buff *skb,
288 if (strp->stm.full_len)
289 chunk = strp->stm.full_len - skb->len;
294 nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk);
312 if (!strp->stm.full_len) {
313 sz = tls_rx_msg_size(strp, skb);
329 strp->stm.full_len = sz;
338 struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
342 if (strp->msg_ready)
345 skb = strp->anchor;
349 strp->mixed_decrypted |= !!skb_cmp_decrypted(skb, in_skb);
351 if (IS_ENABLED(CONFIG_TLS_DEVICE) && strp->mixed_decrypted)
352 ret = tls_strp_copyin_skb(strp, skb, in_skb, offset, in_len);
354 ret = tls_strp_copyin_frag(strp, skb, in_skb, offset, in_len);
360 if (strp->stm.full_len && strp->stm.full_len == skb->len) {
363 strp->msg_ready = 1;
364 tls_rx_msg_ready(strp);
370 static int tls_strp_read_copyin(struct tls_strparser *strp)
374 desc.arg.data = strp;
379 tcp_read_sock(strp->sk, &desc, tls_strp_copyin);
384 static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
394 if (likely(qshort && !tcp_epollin_ready(strp->sk, INT_MAX)))
397 shinfo = skb_shinfo(strp->anchor);
401 need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
404 page = alloc_page(strp->sk->sk_allocation);
406 tls_strp_flush_anchor_copy(strp);
410 skb_fill_page_desc(strp->anchor, shinfo->nr_frags++,
414 strp->copy_mode = 1;
415 strp->stm.offset = 0;
417 strp->anchor->len = 0;
418 strp->anchor->data_len = 0;
419 strp->anchor->truesize = round_up(need_spc, PAGE_SIZE);
421 tls_strp_read_copyin(strp);
426 static bool tls_strp_check_queue_ok(struct tls_strparser *strp)
428 unsigned int len = strp->stm.offset + strp->stm.full_len;
432 first = skb_shinfo(strp->anchor)->frag_list;
453 static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len)
455 struct tcp_sock *tp = tcp_sk(strp->sk);
459 first = tcp_recv_skb(strp->sk, tp->copied_seq, &offset);
464 strp->anchor->len = offset + len;
465 strp->anchor->data_len = offset + len;
466 strp->anchor->truesize = offset + len;
468 skb_shinfo(strp->anchor)->frag_list = first;
470 skb_copy_header(strp->anchor, first);
471 strp->anchor->destructor = NULL;
473 strp->stm.offset = offset;
476 void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
481 DEBUG_NET_WARN_ON_ONCE(!strp->msg_ready);
482 DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len);
484 if (!strp->copy_mode && force_refresh) {
485 if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len))
488 tls_strp_load_anchor_with_queue(strp, strp->stm.full_len);
491 rxm = strp_msg(strp->anchor);
492 rxm->full_len = strp->stm.full_len;
493 rxm->offset = strp->stm.offset;
494 tlm = tls_msg(strp->anchor);
495 tlm->control = strp->mark;
499 static int tls_strp_read_sock(struct tls_strparser *strp)
503 inq = tcp_inq(strp->sk);
507 if (unlikely(strp->copy_mode))
508 return tls_strp_read_copyin(strp);
510 if (inq < strp->stm.full_len)
511 return tls_strp_read_copy(strp, true);
513 if (!strp->stm.full_len) {
514 tls_strp_load_anchor_with_queue(strp, inq);
516 sz = tls_rx_msg_size(strp, strp->anchor);
518 tls_strp_abort_strp(strp, sz);
522 strp->stm.full_len = sz;
524 if (!strp->stm.full_len || inq < strp->stm.full_len)
525 return tls_strp_read_copy(strp, true);
528 if (!tls_strp_check_queue_ok(strp))
529 return tls_strp_read_copy(strp, false);
531 strp->msg_ready = 1;
532 tls_rx_msg_ready(strp);
537 void tls_strp_check_rcv(struct tls_strparser *strp)
539 if (unlikely(strp->stopped) || strp->msg_ready)
542 if (tls_strp_read_sock(strp) == -ENOMEM)
543 queue_work(tls_strp_wq, &strp->work);
547 void tls_strp_data_ready(struct tls_strparser *strp)
556 if (sock_owned_by_user_nocheck(strp->sk)) {
557 queue_work(tls_strp_wq, &strp->work);
561 tls_strp_check_rcv(strp);
566 struct tls_strparser *strp =
569 lock_sock(strp->sk);
570 tls_strp_check_rcv(strp);
571 release_sock(strp->sk);
574 void tls_strp_msg_done(struct tls_strparser *strp)
576 WARN_ON(!strp->stm.full_len);
578 if (likely(!strp->copy_mode))
579 tcp_read_done(strp->sk, strp->stm.full_len);
581 tls_strp_flush_anchor_copy(strp);
583 strp->msg_ready = 0;
584 memset(&strp->stm, 0, sizeof(strp->stm));
586 tls_strp_check_rcv(strp);
589 void tls_strp_stop(struct tls_strparser *strp)
591 strp->stopped = 1;
594 int tls_strp_init(struct tls_strparser *strp, struct sock *sk)
596 memset(strp, 0, sizeof(*strp));
598 strp->sk = sk;
600 strp->anchor = alloc_skb(0, GFP_KERNEL);
601 if (!strp->anchor)
604 INIT_WORK(&strp->work, tls_strp_work);
609 /* strp must already be stopped so that tls_strp_recv will no longer be called.
612 void tls_strp_done(struct tls_strparser *strp)
614 WARN_ON(!strp->stopped);
616 cancel_work_sync(&strp->work);
617 tls_strp_anchor_free(strp);
622 tls_strp_wq = create_workqueue("tls-strp");