Lines Matching refs:cf_sk

57 static int rx_flow_is_on(struct caifsock *cf_sk)
59 return test_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
62 static int tx_flow_is_on(struct caifsock *cf_sk)
64 return test_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
67 static void set_rx_flow_off(struct caifsock *cf_sk)
69 clear_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
72 static void set_rx_flow_on(struct caifsock *cf_sk)
74 set_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
77 static void set_tx_flow_off(struct caifsock *cf_sk)
79 clear_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
82 static void set_tx_flow_on(struct caifsock *cf_sk)
84 set_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
89 struct caifsock *cf_sk;
90 cf_sk = container_of(sk, struct caifsock, sk);
91 mutex_lock(&cf_sk->readlock);
96 struct caifsock *cf_sk;
97 cf_sk = container_of(sk, struct caifsock, sk);
98 mutex_unlock(&cf_sk->readlock);
101 static int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
104 return cf_sk->sk.sk_rcvbuf / 4;
109 struct caifsock *cf_sk;
110 cf_sk = container_of(sk, struct caifsock, sk);
111 if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd)
112 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
124 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
128 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
130 atomic_read(&cf_sk->sk.sk_rmem_alloc),
131 sk_rcvbuf_lowwater(cf_sk));
132 set_rx_flow_off(cf_sk);
140 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
141 set_rx_flow_off(cf_sk);
162 struct caifsock *cf_sk;
165 cf_sk = container_of(layr, struct caifsock, layer);
168 if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) {
172 caif_queue_rcv_skb(&cf_sk->sk, skb);
178 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
179 sock_hold(&cf_sk->sk);
184 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
185 sock_put(&cf_sk->sk);
193 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
197 set_tx_flow_on(cf_sk);
198 cf_sk->sk.sk_state_change(&cf_sk->sk);
203 set_tx_flow_off(cf_sk);
204 cf_sk->sk.sk_state_change(&cf_sk->sk);
209 caif_client_register_refcnt(&cf_sk->layer,
211 cf_sk->sk.sk_state = CAIF_CONNECTED;
212 set_tx_flow_on(cf_sk);
213 cf_sk->sk.sk_shutdown = 0;
214 cf_sk->sk.sk_state_change(&cf_sk->sk);
219 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
220 cf_sk->sk.sk_state_change(&cf_sk->sk);
225 cf_sk->sk.sk_err = ECONNREFUSED;
226 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
227 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
232 set_tx_flow_on(cf_sk);
233 cf_sk->sk.sk_state_change(&cf_sk->sk);
238 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
239 cf_sk->sk.sk_err = ECONNRESET;
240 set_rx_flow_on(cf_sk);
241 sk_error_report(&cf_sk->sk);
251 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
253 if (rx_flow_is_on(cf_sk))
256 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
257 set_rx_flow_on(cf_sk);
459 static long caif_wait_for_flow_on(struct caifsock *cf_sk,
462 struct sock *sk = &cf_sk->sk;
466 if (tx_flow_is_on(cf_sk) &&
467 (!wait_writeable || sock_writeable(&cf_sk->sk)))
483 if (cf_sk->sk.sk_state != CAIF_CONNECTED)
495 static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
502 cfpkt_set_prio(pkt, cf_sk->sk.sk_priority);
504 if (cf_sk->layer.dn == NULL) {
509 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
517 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
523 caif_assert(cf_sk);
545 if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
552 if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM)
555 buffer_size = len + cf_sk->headroom + cf_sk->tailroom;
563 skb_reserve(skb, cf_sk->headroom);
569 ret = transmit_skb(skb, cf_sk, noblock, timeo);
589 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
603 timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err);
612 if (size > cf_sk->maxframe)
613 size = cf_sk->maxframe;
623 size + cf_sk->headroom +
624 cf_sk->tailroom,
630 skb_reserve(skb, cf_sk->headroom);
645 err = transmit_skb(skb, cf_sk,
668 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
671 if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
682 lock_sock(&(cf_sk->sk));
683 cf_sk->conn_req.link_selector = linksel;
684 release_sock(&cf_sk->sk);
690 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
692 lock_sock(&(cf_sk->sk));
693 if (ol > sizeof(cf_sk->conn_req.param.data) ||
694 copy_from_sockptr(&cf_sk->conn_req.param.data, ov, ol)) {
695 release_sock(&cf_sk->sk);
698 cf_sk->conn_req.param.size = ol;
699 release_sock(&cf_sk->sk);
740 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
783 caif_disconnect_client(sock_net(sk), &cf_sk->layer);
784 caif_free_client(&cf_sk->layer);
797 sk_stream_kill_queues(&cf_sk->sk);
803 memcpy(&cf_sk->conn_req.sockaddr, uaddr,
812 if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
813 cf_sk->conn_req.priority = CAIF_PRIO_MAX;
814 else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN)
815 cf_sk->conn_req.priority = CAIF_PRIO_MIN;
817 cf_sk->conn_req.priority = cf_sk->sk.sk_priority;
820 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
822 cf_sk->layer.receive = caif_sktrecv_cb;
824 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
825 &cf_sk->layer, &ifindex, &headroom, &tailroom);
828 cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
829 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
840 cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom);
844 cf_sk->tailroom = tailroom;
845 cf_sk->maxframe = mtu - (headroom + tailroom);
846 if (cf_sk->maxframe < 1) {
893 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
898 set_tx_flow_off(cf_sk);
910 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
911 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
913 lock_sock(&(cf_sk->sk));
917 caif_disconnect_client(sock_net(sk), &cf_sk->layer);
918 cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
922 sk_stream_kill_queues(&cf_sk->sk);
934 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
956 if (sock_writeable(sk) && tx_flow_is_on(cf_sk))
1003 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
1011 sk_stream_kill_queues(&cf_sk->sk);
1013 caif_free_client(&cf_sk->layer);
1020 struct caifsock *cf_sk = NULL;
1054 cf_sk = container_of(sk, struct caifsock, sk);
1075 lock_sock(&(cf_sk->sk));
1081 mutex_init(&cf_sk->readlock); /* single task reading lock */
1082 cf_sk->layer.ctrlcmd = caif_ctrl_cb;
1083 cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
1084 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
1086 set_tx_flow_off(cf_sk);
1087 set_rx_flow_on(cf_sk);
1090 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
1091 cf_sk->conn_req.protocol = protocol;
1092 release_sock(&cf_sk->sk);