Lines Matching refs:ctx
56 static void tls_device_free_ctx(struct tls_context *ctx)
58 if (ctx->tx_conf == TLS_HW) {
59 kfree(tls_offload_ctx_tx(ctx));
60 kfree(ctx->tx.rec_seq);
61 kfree(ctx->tx.iv);
64 if (ctx->rx_conf == TLS_HW)
65 kfree(tls_offload_ctx_rx(ctx));
67 tls_ctx_free(NULL, ctx);
72 struct tls_context *ctx, *tmp;
80 list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
81 struct net_device *netdev = ctx->netdev;
83 if (netdev && ctx->tx_conf == TLS_HW) {
84 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
87 ctx->netdev = NULL;
90 list_del(&ctx->list);
91 tls_device_free_ctx(ctx);
95 static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
100 if (unlikely(!refcount_dec_and_test(&ctx->refcount)))
103 list_move_tail(&ctx->list, &tls_device_gc_list);
154 struct tls_offload_context_tx *ctx;
161 ctx = tls_offload_ctx_tx(tls_ctx);
163 spin_lock_irqsave(&ctx->lock, flags);
164 info = ctx->retransmit_hint;
166 ctx->retransmit_hint = NULL;
168 list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
177 ctx->unacked_record_sn += deleted_records;
178 spin_unlock_irqrestore(&ctx->lock, flags);
188 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
193 if (ctx->open_record)
194 destroy_record(ctx->open_record);
195 delete_all_records(ctx);
196 crypto_free_aead(ctx->aead_send);
272 struct tls_context *ctx,
277 struct tls_prot_info *prot = &ctx->prot_info;
286 if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
287 tls_device_resync_tx(sk, ctx, tp->write_seq);
289 tls_advance_record_sn(sk, prot, &ctx->tx);
302 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
306 struct tls_context *ctx,
311 struct tls_prot_info *prot = &ctx->prot_info;
331 tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
423 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
424 struct tls_record_info *record = ctx->open_record;
459 rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
465 record = ctx->open_record;
476 ctx->open_record = NULL;
484 record = ctx->open_record;
518 ctx->open_record = NULL;
525 ctx,
667 void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
669 if (tls_is_partially_sent_record(ctx)) {
675 tls_push_partial_record(sk, ctx,
820 struct tls_offload_context_rx *ctx,
826 if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
829 if (ctx->resync_nh_do_now)
832 if (ctx->resync_nh_reset) {
833 ctx->resync_nh_reset = 0;
834 ctx->resync_nh.decrypted_failed = 1;
835 ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
839 if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
843 if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
844 ctx->resync_nh.decrypted_tgt *= 2;
846 ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
853 ctx->resync_nh_do_now = 1;
954 struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
969 ctx->sw.decrypted |= is_decrypted;
987 ctx->resync_nh_reset = 1;
991 tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
995 ctx->resync_nh_reset = 1;
999 static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
1003 refcount_set(&ctx->refcount, 1);
1005 ctx->netdev = netdev;
1007 list_add_tail(&ctx->list, &tls_device_list);
1010 ctx->sk_destruct = sk->sk_destruct;
1015 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
1029 if (!ctx)
1032 if (ctx->priv_ctx_tx)
1045 crypto_info = &ctx->crypto_send.info;
1078 ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1080 if (!ctx->tx.iv) {
1085 memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
1088 ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
1089 if (!ctx->tx.rec_seq) {
1099 memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
1113 ctx->push_pending_record = tls_device_push_pending_record;
1141 * ctx was added to tls_device_list.
1149 ctx->priv_ctx_tx = offload_ctx;
1151 &ctx->crypto_send.info,
1158 tls_device_attach(ctx, sk, netdev);
1178 kfree(ctx->tx.rec_seq);
1180 kfree(ctx->tx.iv);
1183 ctx->priv_ctx_tx = NULL;
1189 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
1196 if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
1216 * ctx was added to tls_device_list.
1231 ctx->priv_ctx_rx = context;
1232 rc = tls_set_sw_offload(sk, ctx, 0);
1237 &ctx->crypto_recv.info,
1239 info = (void *)&ctx->crypto_recv.info;
1245 tls_device_attach(ctx, sk, netdev);
1257 ctx->priv_ctx_rx = NULL;
1291 struct tls_context *ctx, *tmp;
1299 list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
1300 if (ctx->netdev != netdev ||
1301 !refcount_inc_not_zero(&ctx->refcount))
1304 list_move(&ctx->list, &list);
1308 list_for_each_entry_safe(ctx, tmp, &list, list) {
1312 WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
1317 WRITE_ONCE(ctx->netdev, NULL);
1320 set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
1329 if (ctx->tx_conf == TLS_HW)
1330 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1332 if (ctx->rx_conf == TLS_HW &&
1333 !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
1334 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1345 list_move_tail(&ctx->list, &tls_device_down_list);
1352 if (refcount_dec_and_test(&ctx->refcount)) {
1356 list_del(&ctx->list);
1357 tls_device_free_ctx(ctx);