Lines Matching refs:csk
98 struct cxgbi_sock *csk;
103 csk = pmap->port_csk[i];
106 "csk 0x%p, cdev 0x%p, offload down.\n",
107 csk, cdev);
108 spin_lock_bh(&csk->lock);
109 cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN);
110 cxgbi_sock_closed(csk);
111 spin_unlock_bh(&csk->lock);
112 cxgbi_sock_put(csk);
424 struct cxgbi_sock *csk = pmap->port_csk[i];
426 if (csk) {
427 if (csk->port_id == port_id) {
429 return csk;
439 static int sock_get_port(struct cxgbi_sock *csk)
441 struct cxgbi_device *cdev = csk->cdev;
449 cdev, csk->port_id, cdev->ports[csk->port_id]->name);
453 if (csk->csk_family == AF_INET)
454 port = &csk->saddr.sin_port;
456 port = &csk->saddr6.sin6_port;
468 cdev, csk->port_id, cdev->ports[csk->port_id]->name);
480 pmap->port_csk[idx] = csk;
482 cxgbi_sock_get(csk);
485 cdev, csk->port_id,
486 cdev->ports[csk->port_id]->name,
495 cdev, csk->port_id, cdev->ports[csk->port_id]->name,
500 static void sock_put_port(struct cxgbi_sock *csk)
502 struct cxgbi_device *cdev = csk->cdev;
506 if (csk->csk_family == AF_INET)
507 port = &csk->saddr.sin_port;
509 port = &csk->saddr6.sin6_port;
517 cdev, csk->port_id,
518 cdev->ports[csk->port_id]->name,
530 cdev, csk->port_id, cdev->ports[csk->port_id]->name,
533 cxgbi_sock_put(csk);
540 void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk)
542 if (csk->cpl_close) {
543 kfree_skb(csk->cpl_close);
544 csk->cpl_close = NULL;
546 if (csk->cpl_abort_req) {
547 kfree_skb(csk->cpl_abort_req);
548 csk->cpl_abort_req = NULL;
550 if (csk->cpl_abort_rpl) {
551 kfree_skb(csk->cpl_abort_rpl);
552 csk->cpl_abort_rpl = NULL;
559 struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO);
561 if (!csk) {
562 pr_info("alloc csk %zu failed.\n", sizeof(*csk));
566 if (cdev->csk_alloc_cpls(csk) < 0) {
567 pr_info("csk 0x%p, alloc cpls failed.\n", csk);
568 kfree(csk);
572 spin_lock_init(&csk->lock);
573 kref_init(&csk->refcnt);
574 skb_queue_head_init(&csk->receive_queue);
575 skb_queue_head_init(&csk->write_queue);
576 timer_setup(&csk->retry_timer, NULL, 0);
577 init_completion(&csk->cmpl);
578 rwlock_init(&csk->callback_lock);
579 csk->cdev = cdev;
580 csk->flags = 0;
581 cxgbi_sock_set_state(csk, CTP_CLOSED);
583 log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk);
585 return csk;
613 struct cxgbi_sock *csk = NULL;
674 csk = cxgbi_sock_create(cdev);
675 if (!csk) {
679 csk->cdev = cdev;
680 csk->port_id = port;
681 csk->mtu = mtu;
682 csk->dst = dst;
684 csk->csk_family = AF_INET;
685 csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
686 csk->daddr.sin_port = daddr->sin_port;
687 csk->daddr.sin_family = daddr->sin_family;
688 csk->saddr.sin_family = daddr->sin_family;
689 csk->saddr.sin_addr.s_addr = fl4.saddr;
692 return csk;
729 struct cxgbi_sock *csk = NULL;
785 csk = cxgbi_sock_create(cdev);
786 if (!csk) {
790 csk->cdev = cdev;
791 csk->port_id = port;
792 csk->mtu = mtu;
793 csk->dst = dst;
808 csk->csk_family = AF_INET6;
809 csk->daddr6.sin6_addr = daddr6->sin6_addr;
810 csk->daddr6.sin6_port = daddr6->sin6_port;
811 csk->daddr6.sin6_family = daddr6->sin6_family;
812 csk->saddr6.sin6_family = daddr6->sin6_family;
813 csk->saddr6.sin6_addr = pref_saddr;
816 return csk;
823 if (csk)
824 cxgbi_sock_closed(csk);
830 void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn,
833 csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn;
834 dst_confirm(csk->dst);
836 cxgbi_sock_set_state(csk, CTP_ESTABLISHED);
840 static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk)
843 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n",
844 csk, csk->state, csk->flags, csk->user_data);
846 if (csk->state != CTP_ESTABLISHED) {
847 read_lock_bh(&csk->callback_lock);
848 if (csk->user_data)
849 iscsi_conn_failure(csk->user_data,
851 read_unlock_bh(&csk->callback_lock);
855 void cxgbi_sock_closed(struct cxgbi_sock *csk)
857 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
858 csk, (csk)->state, (csk)->flags, (csk)->tid);
859 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
860 if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED)
862 if (csk->saddr.sin_port)
863 sock_put_port(csk);
864 if (csk->dst)
865 dst_release(csk->dst);
866 csk->cdev->csk_release_offload_resources(csk);
867 cxgbi_sock_set_state(csk, CTP_CLOSED);
868 cxgbi_inform_iscsi_conn_closing(csk);
869 cxgbi_sock_put(csk);
873 static void need_active_close(struct cxgbi_sock *csk)
878 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
879 csk, (csk)->state, (csk)->flags, (csk)->tid);
880 spin_lock_bh(&csk->lock);
881 if (csk->dst)
882 dst_confirm(csk->dst);
883 data_lost = skb_queue_len(&csk->receive_queue);
884 __skb_queue_purge(&csk->receive_queue);
886 if (csk->state == CTP_ACTIVE_OPEN)
887 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
888 else if (csk->state == CTP_ESTABLISHED) {
890 cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE);
891 } else if (csk->state == CTP_PASSIVE_CLOSE) {
893 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
897 if (!cxgbi_sock_flag(csk, CTPF_LOGOUT_RSP_RCVD) ||
899 csk->cdev->csk_send_abort_req(csk);
901 csk->cdev->csk_send_close_req(csk);
904 spin_unlock_bh(&csk->lock);
907 void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno)
909 pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n",
910 csk, csk->state, csk->flags,
911 &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port,
912 &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port,
915 cxgbi_sock_set_state(csk, CTP_CONNECTING);
916 csk->err = errno;
917 cxgbi_sock_closed(csk);
923 struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
924 struct module *owner = csk->cdev->owner;
926 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
927 csk, (csk)->state, (csk)->flags, (csk)->tid);
928 cxgbi_sock_get(csk);
929 spin_lock_bh(&csk->lock);
930 if (csk->state == CTP_ACTIVE_OPEN)
931 cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH);
932 spin_unlock_bh(&csk->lock);
933 cxgbi_sock_put(csk);
940 void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk)
942 cxgbi_sock_get(csk);
943 spin_lock_bh(&csk->lock);
945 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD);
946 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
947 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING);
948 if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
949 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
950 csk, csk->state, csk->flags, csk->tid);
951 cxgbi_sock_closed(csk);
954 spin_unlock_bh(&csk->lock);
955 cxgbi_sock_put(csk);
959 void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk)
961 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
962 csk, (csk)->state, (csk)->flags, (csk)->tid);
963 cxgbi_sock_get(csk);
964 spin_lock_bh(&csk->lock);
966 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
969 switch (csk->state) {
971 cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE);
974 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
977 cxgbi_sock_closed(csk);
982 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
983 csk, csk->state, csk->flags, csk->tid);
985 cxgbi_inform_iscsi_conn_closing(csk);
987 spin_unlock_bh(&csk->lock);
988 cxgbi_sock_put(csk);
992 void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt)
994 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
995 csk, (csk)->state, (csk)->flags, (csk)->tid);
996 cxgbi_sock_get(csk);
997 spin_lock_bh(&csk->lock);
999 csk->snd_una = snd_nxt - 1;
1000 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
1003 switch (csk->state) {
1005 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1);
1009 cxgbi_sock_closed(csk);
1014 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
1015 csk, csk->state, csk->flags, csk->tid);
1018 spin_unlock_bh(&csk->lock);
1019 cxgbi_sock_put(csk);
1023 void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits,
1027 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
1028 csk, csk->state, csk->flags, csk->tid, credits,
1029 csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk);
1031 spin_lock_bh(&csk->lock);
1033 csk->wr_cred += credits;
1034 if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred)
1035 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1038 struct sk_buff *p = cxgbi_sock_peek_wr(csk);
1041 pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n",
1042 csk, csk->state, csk->flags, csk->tid, credits,
1043 csk->wr_cred, csk->wr_una_cred);
1048 pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n",
1049 csk, csk->state, csk->flags, csk->tid,
1050 credits, csk->wr_cred, csk->wr_una_cred,
1055 cxgbi_sock_dequeue_wr(csk);
1061 cxgbi_sock_check_wr_invariants(csk);
1064 if (unlikely(before(snd_una, csk->snd_una))) {
1065 pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
1066 csk, csk->state, csk->flags, csk->tid, snd_una,
1067 csk->snd_una);
1071 if (csk->snd_una != snd_una) {
1072 csk->snd_una = snd_una;
1073 dst_confirm(csk->dst);
1077 if (skb_queue_len(&csk->write_queue)) {
1078 if (csk->cdev->csk_push_tx_frames(csk, 0))
1079 cxgbi_conn_tx_open(csk);
1081 cxgbi_conn_tx_open(csk);
1083 spin_unlock_bh(&csk->lock);
1087 static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk,
1092 while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu)
1098 unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu)
1101 struct dst_entry *dst = csk->dst;
1103 csk->advmss = dst_metric_advmss(dst);
1105 if (csk->advmss > pmtu - 40)
1106 csk->advmss = pmtu - 40;
1107 if (csk->advmss < csk->cdev->mtus[0] - 40)
1108 csk->advmss = csk->cdev->mtus[0] - 40;
1109 idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40);
1115 void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb)
1117 cxgbi_skcb_tcp_seq(skb) = csk->write_seq;
1118 __skb_queue_tail(&csk->write_queue, skb);
1122 void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk)
1126 while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL)
1131 void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk)
1133 int pending = cxgbi_sock_count_pending_wrs(csk);
1135 if (unlikely(csk->wr_cred + pending != csk->wr_max_cred))
1136 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1137 csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred);
1264 struct cxgbi_sock *csk = cconn->cep->csk;
1265 struct cxgbi_device *cdev = csk->cdev;
1324 ttinfo->cid = csk->port_id;
1326 cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset,
1334 err = cdev->csk_ddp_set_map(ppm, csk, ttinfo);
1406 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
1407 cconn->cep->csk, task, tdata->dlen,
1451 void cxgbi_conn_tx_open(struct cxgbi_sock *csk)
1453 struct iscsi_conn *conn = csk->user_data;
1457 "csk 0x%p, cid %d.\n", csk, conn->id);
1508 skb_read_pdu_bhs(struct cxgbi_sock *csk, struct iscsi_conn *conn,
1556 cxgbi_sock_set_flag(csk, CTPF_LOGOUT_RSP_RCVD);
1600 static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
1602 struct cxgbi_device *cdev = csk->cdev;
1607 "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n",
1608 csk, csk->state, csk->flags, csk->tid, csk->copied_seq,
1609 csk->rcv_wup, cdev->rx_credit_thres,
1610 csk->rcv_win);
1615 if (csk->state != CTP_ESTABLISHED)
1618 credits = csk->copied_seq - csk->rcv_wup;
1621 must_send = credits + 16384 >= csk->rcv_win;
1623 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
1626 void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
1628 struct cxgbi_device *cdev = csk->cdev;
1629 struct iscsi_conn *conn = csk->user_data;
1635 "csk 0x%p, conn 0x%p.\n", csk, conn);
1639 "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
1640 csk, conn, conn ? conn->id : 0xFF,
1646 skb = skb_peek(&csk->receive_queue);
1655 __skb_unlink(skb, &csk->receive_queue);
1659 "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n",
1660 csk, skb, skb->len, cxgbi_skcb_flags(skb),
1664 err = skb_read_pdu_bhs(csk, conn, skb);
1666 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
1668 csk, skb, skb->len,
1676 pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, "
1678 csk, skb, skb->len,
1682 err = skb_read_pdu_bhs(csk, conn, skb);
1684 pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
1686 csk, skb, skb->len,
1695 dskb = skb_peek(&csk->receive_queue);
1697 pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx,"
1699 csk, skb, skb->len,
1705 __skb_unlink(dskb, &csk->receive_queue);
1709 pr_err("data, csk 0x%p, skb 0x%p,%u, "
1712 csk, skb, skb->len,
1727 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read);
1729 csk->copied_seq += read;
1730 csk_return_rx_credits(csk, read);
1735 pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n",
1736 csk, conn, err, read);
1892 struct cxgbi_sock *csk = cconn->cep ? cconn->cep->csk : NULL;
1907 if (!csk) {
1908 pr_err("task 0x%p, csk gone.\n", task);
1948 if (cdev->skb_iso_txhdr && !csk->disable_iso &&
1960 csk->advmss);
2273 static int cxgbi_sock_tx_queue_up(struct cxgbi_sock *csk, struct sk_buff *skb)
2275 struct cxgbi_device *cdev = csk->cdev;
2281 if (csk->state != CTP_ESTABLISHED) {
2283 "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
2284 csk, csk->state, csk->flags, csk->tid);
2288 if (csk->err) {
2290 "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
2291 csk, csk->state, csk->flags, csk->tid, csk->err);
2296 before((csk->snd_win + csk->snd_una), csk->write_seq)) {
2298 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
2299 csk, csk->state, csk->flags, csk->tid, csk->write_seq,
2300 csk->snd_una, csk->snd_win);
2308 pr_err("csk 0x%p, skb head %u < %u.\n",
2309 csk, skb_headroom(skb), cdev->skb_tx_rsvd);
2317 pr_err("csk 0x%p, frags %u, %u,%u >%lu.\n",
2318 csk, skb_shinfo(skb)->nr_frags, skb->len,
2325 cxgbi_sock_skb_entail(csk, skb);
2337 csk->write_seq += (skb->len + extra_len);
2342 static int cxgbi_sock_send_skb(struct cxgbi_sock *csk, struct sk_buff *skb)
2344 struct cxgbi_device *cdev = csk->cdev;
2348 spin_lock_bh(&csk->lock);
2349 err = cxgbi_sock_tx_queue_up(csk, skb);
2351 spin_unlock_bh(&csk->lock);
2355 if (likely(skb_queue_len(&csk->write_queue)))
2356 cdev->csk_push_tx_frames(csk, 0);
2357 spin_unlock_bh(&csk->lock);
2369 struct cxgbi_sock *csk = NULL;
2389 csk = cconn->cep->csk;
2391 if (!csk) {
2393 "task 0x%p, csk gone.\n", task);
2402 struct cxgbi_ppm *ppm = csk->cdev->cdev2ppm(csk->cdev);
2405 if (csk->cdev->csk_ddp_set_map(ppm, csk, ttinfo) < 0)
2414 err = cxgbi_sock_send_skb(csk, skb);
2429 if (unlikely(cxgbi_is_iso_config(csk) && cxgbi_is_iso_disabled(csk))) {
2430 if (time_after(jiffies, csk->prev_iso_ts + HZ)) {
2431 csk->disable_iso = false;
2432 csk->prev_iso_ts = 0;
2434 "enable iso: csk 0x%p\n", csk);
2448 if (cxgbi_is_iso_config(csk) && !cxgbi_is_iso_disabled(csk) &&
2449 (csk->no_tx_credits++ >= 2)) {
2450 csk->disable_iso = true;
2451 csk->prev_iso_ts = jiffies;
2453 "disable iso:csk 0x%p, ts:%lu\n",
2454 csk, csk->prev_iso_ts);
2574 struct cxgbi_sock *csk = cconn->cep->csk;
2585 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2592 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2619 struct cxgbi_sock *csk;
2630 csk = cep->csk;
2631 if (!csk)
2635 &csk->daddr, param, buf);
2678 struct cxgbi_sock *csk;
2687 csk = cep->csk;
2689 ppm = csk->cdev->cdev2ppm(csk->cdev);
2690 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
2704 write_lock_bh(&csk->callback_lock);
2705 csk->user_data = conn;
2709 write_unlock_bh(&csk->callback_lock);
2715 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
2716 cls_session, cls_conn, ep, cconn, csk);
2837 struct cxgbi_sock *csk = find_sock_on_port(chba->cdev,
2839 if (csk) {
2841 (struct sockaddr *)&csk->saddr);
2862 struct cxgbi_sock *csk;
2880 csk = cxgbi_check_route(dst_addr, ifindex);
2883 csk = cxgbi_check_route6(dst_addr, ifindex);
2892 if (IS_ERR(csk))
2893 return (struct iscsi_endpoint *)csk;
2894 cxgbi_sock_get(csk);
2897 hba = csk->cdev->hbas[csk->port_id];
2898 else if (hba != csk->cdev->hbas[csk->port_id]) {
2900 cxgbi_sock_put(csk);
2901 cxgbi_sock_closed(csk);
2909 csk->cdev->hbas[csk->port_id], csk->port_id);
2914 err = sock_get_port(csk);
2918 cxgbi_sock_set_state(csk, CTP_CONNECTING);
2919 err = csk->cdev->csk_init_act_open(csk);
2923 if (cxgbi_sock_is_closing(csk)) {
2925 pr_info("csk 0x%p is closing.\n", csk);
2937 cep->csk = csk;
2941 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n",
2942 ep, cep, csk, hba, hba->ndev->name);
2946 cxgbi_sock_put(csk);
2947 cxgbi_sock_closed(csk);
2956 struct cxgbi_sock *csk = cep->csk;
2958 if (!cxgbi_sock_is_established(csk))
2968 struct cxgbi_sock *csk = cep->csk;
2971 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n",
2972 ep, cep, cconn, csk, csk->state, csk->flags);
2976 write_lock_bh(&csk->callback_lock);
2977 cep->csk->user_data = NULL;
2979 write_unlock_bh(&csk->callback_lock);
2983 if (likely(csk->state >= CTP_ESTABLISHED))
2984 need_active_close(csk);
2986 cxgbi_sock_closed(csk);
2988 cxgbi_sock_put(csk);