Lines Matching refs:tls_ctx

164 	struct tls_context *tls_ctx = tls_get_ctx(sk);
170 if (!tls_ctx)
173 ctx = tls_offload_ctx_tx(tls_ctx);
199 struct tls_context *tls_ctx = tls_get_ctx(sk);
200 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
202 tls_ctx->sk_destruct(sk);
204 if (tls_ctx->tx_conf == TLS_HW) {
212 tls_device_queue_ctx_destruction(tls_ctx);
218 struct tls_context *tls_ctx = tls_get_ctx(sk);
220 tls_free_partial_record(sk, tls_ctx);
225 struct tls_context *tls_ctx = tls_get_ctx(sk);
228 WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
232 static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
244 rcd_sn = tls_ctx->tx.rec_seq;
248 netdev = rcu_dereference_protected(tls_ctx->netdev,
258 clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
429 struct tls_context *tls_ctx = tls_get_ctx(sk);
430 struct tls_prot_info *prot = &tls_ctx->prot_info;
431 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
457 if (tls_is_partially_sent_record(tls_ctx)) {
458 rc = tls_push_partial_record(sk, tls_ctx, flags);
547 tls_device_record_close(sk, tls_ctx, record,
551 tls_ctx,
560 tls_ctx->pending_open_record_frags = more;
571 struct tls_context *tls_ctx = tls_get_ctx(sk);
574 if (!tls_ctx->zerocopy_sendfile)
577 mutex_lock(&tls_ctx->tx_lock);
591 mutex_unlock(&tls_ctx->tx_lock);
598 struct tls_context *tls_ctx = tls_get_ctx(sk);
601 if (!tls_is_partially_sent_record(tls_ctx))
604 mutex_lock(&tls_ctx->tx_lock);
607 if (tls_is_partially_sent_record(tls_ctx)) {
613 mutex_unlock(&tls_ctx->tx_lock);
699 static void tls_device_resync_rx(struct tls_context *tls_ctx,
702 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
707 netdev = rcu_dereference(tls_ctx->netdev);
773 struct tls_context *tls_ctx = tls_get_ctx(sk);
782 if (tls_ctx->rx_conf != TLS_HW)
784 if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
787 prot = &tls_ctx->prot_info;
788 rx_ctx = tls_offload_ctx_rx(tls_ctx);
789 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
833 tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
836 static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
872 struct tls_prot_info *prot = &tls_ctx->prot_info;
875 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
878 tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
884 tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
886 struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
894 switch (tls_ctx->crypto_recv.info.cipher_type) {
901 cipher_desc = get_cipher_desc(tls_ctx->crypto_recv.info.cipher_type);
981 int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
983 struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
984 struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
998 tls_ctx->rx.rec_seq, rxm->full_len,
1001 if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
1009 return tls_device_reencrypt(sk, tls_ctx);
1021 tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
1026 return tls_device_reencrypt(sk, tls_ctx);
1047 struct tls_context *tls_ctx = tls_get_ctx(sk);
1048 struct tls_prot_info *prot = &tls_ctx->prot_info;
1288 struct tls_context *tls_ctx = tls_get_ctx(sk);
1292 netdev = rcu_dereference_protected(tls_ctx->netdev,
1297 netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
1300 if (tls_ctx->tx_conf != TLS_HW) {
1302 rcu_assign_pointer(tls_ctx->netdev, NULL);
1304 set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);