Lines Matching refs:msk

57 pm_nl_get_pernet_from_msk(const struct mptcp_sock *msk)
59 return pm_nl_get_pernet(sock_net((struct sock *)msk));
153 const struct mptcp_sock *msk)
157 msk_owned_by_me(msk);
164 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
175 select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk)
182 * Note: removal from the local address list during the msk life-cycle
186 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
199 unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk)
201 const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
207 unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk)
209 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
215 unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk)
217 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
223 unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk)
225 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
231 bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk)
233 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
235 if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) ||
236 (find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap,
238 WRITE_ONCE(msk->pm.work_pending, false);
245 mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
250 lockdep_assert_held(&msk->pm.lock);
252 list_for_each_entry(entry, &msk->pm.anno_list, list) {
260 bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk)
268 spin_lock_bh(&msk->pm.lock);
269 list_for_each_entry(entry, &msk->pm.anno_list, list) {
277 spin_unlock_bh(&msk->pm.lock);
284 struct mptcp_sock *msk = entry->sock;
285 struct sock *sk = (struct sock *)msk;
287 pr_debug("msk=%p", msk);
289 if (!msk)
298 if (mptcp_pm_should_add_signal_addr(msk)) {
303 spin_lock_bh(&msk->pm.lock);
305 if (!mptcp_pm_should_add_signal_addr(msk)) {
307 mptcp_pm_announce_addr(msk, &entry->addr, false);
308 mptcp_pm_add_addr_send_ack(msk);
316 spin_unlock_bh(&msk->pm.lock);
319 mptcp_pm_subflow_established(msk);
326 mptcp_pm_del_add_timer(struct mptcp_sock *msk,
330 struct sock *sk = (struct sock *)msk;
332 spin_lock_bh(&msk->pm.lock);
333 entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
336 spin_unlock_bh(&msk->pm.lock);
344 bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
348 struct sock *sk = (struct sock *)msk;
351 lockdep_assert_held(&msk->pm.lock);
353 add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
356 if (mptcp_pm_is_kernel(msk))
368 list_add(&add_entry->list, &msk->pm.anno_list);
371 add_entry->sock = msk;
381 void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
384 struct sock *sk = (struct sock *)msk;
387 pr_debug("msk=%p", msk);
389 spin_lock_bh(&msk->pm.lock);
390 list_splice_init(&msk->pm.anno_list, &free_list);
391 spin_unlock_bh(&msk->pm.lock);
402 static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
407 bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0);
408 struct sock *sk = (struct sock *)msk, *ssk;
414 subflows_max = mptcp_pm_get_subflows_max(msk);
427 msk->pm.subflows++;
436 mptcp_for_each_subflow(msk, subflow)
440 mptcp_for_each_subflow(msk, subflow) {
453 if (msk->pm.subflows < subflows_max) {
458 msk->pm.subflows++;
467 static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
474 prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr"));
487 static void mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
490 spin_unlock_bh(&msk->pm.lock);
491 __mptcp_pm_send_ack(msk, subflow, prio, backup);
492 spin_lock_bh(&msk->pm.lock);
522 static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
524 struct sock *sk = (struct sock *)msk;
533 add_addr_signal_max = mptcp_pm_get_add_addr_signal_max(msk);
534 local_addr_max = mptcp_pm_get_local_addr_max(msk);
535 subflows_max = mptcp_pm_get_subflows_max(msk);
538 if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) {
539 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(msk->first);
544 mptcp_local_address((struct sock_common *)msk->first, &mpc_addr);
548 __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap);
549 msk->mpc_endpoint_id = entry->addr.id;
555 mptcp_pm_send_ack(msk, subflow, true, backup);
557 msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED);
561 msk->pm.local_addr_used, local_addr_max,
562 msk->pm.add_addr_signaled, add_addr_signal_max,
563 msk->pm.subflows, subflows_max);
566 if (msk->pm.add_addr_signaled < add_addr_signal_max) {
567 local = select_signal_address(pernet, msk);
576 if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL))
580 if (mptcp_pm_alloc_anno_list(msk, &local->addr)) {
581 __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
582 msk->pm.add_addr_signaled++;
583 mptcp_pm_announce_addr(msk, &local->addr, false);
584 mptcp_pm_nl_addr_send_ack(msk);
590 while (msk->pm.local_addr_used < local_addr_max &&
591 msk->pm.subflows < subflows_max) {
596 local = select_local_address(pernet, msk);
602 msk->pm.local_addr_used++;
603 __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
604 nr = fill_remote_addresses_vec(msk, &local->addr, fullmesh, addrs);
608 spin_unlock_bh(&msk->pm.lock);
611 spin_lock_bh(&msk->pm.lock);
613 mptcp_pm_nl_check_work_pending(msk);
616 static void mptcp_pm_nl_fully_established(struct mptcp_sock *msk)
618 mptcp_pm_create_subflow_or_signal_addr(msk);
621 static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk)
623 mptcp_pm_create_subflow_or_signal_addr(msk);
629 static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
633 struct sock *sk = (struct sock *)msk;
639 pernet = pm_nl_get_pernet_from_msk(msk);
640 subflows_max = mptcp_pm_get_subflows_max(msk);
650 if (msk->pm.subflows < subflows_max) {
651 msk->pm.subflows++;
674 msk->pm.subflows++;
681 static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
684 struct sock *sk = (struct sock *)msk;
690 add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
691 subflows_max = mptcp_pm_get_subflows_max(msk);
694 msk->pm.add_addr_accepted, add_addr_accept_max,
695 msk->pm.remote.family);
697 remote = msk->pm.remote;
698 mptcp_pm_announce_addr(msk, &remote, true);
699 mptcp_pm_nl_addr_send_ack(msk);
701 if (lookup_subflow_by_daddr(&msk->conn_list, &remote))
711 nr = fill_local_addresses_vec(msk, &remote, addrs);
715 msk->pm.add_addr_accepted++;
716 if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
717 msk->pm.subflows >= subflows_max)
718 WRITE_ONCE(msk->pm.accept_addr, false);
720 spin_unlock_bh(&msk->pm.lock);
723 spin_lock_bh(&msk->pm.lock);
726 void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
730 msk_owned_by_me(msk);
731 lockdep_assert_held(&msk->pm.lock);
733 if (!mptcp_pm_should_add_signal(msk) &&
734 !mptcp_pm_should_rm_signal(msk))
737 subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
739 mptcp_pm_send_ack(msk, subflow, false, false);
742 int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
751 mptcp_for_each_subflow(msk, subflow) {
765 __mptcp_pm_send_ack(msk, subflow, true, bkup);
772 static bool mptcp_local_id_match(const struct mptcp_sock *msk, u8 local_id, u8 id)
774 return local_id == id || (!local_id && msk->mpc_endpoint_id == id);
777 static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
782 struct sock *sk = (struct sock *)msk;
788 msk_owned_by_me(msk);
796 if (list_empty(&msk->conn_list))
803 mptcp_for_each_subflow_safe(msk, subflow, tmp) {
811 if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
816 i, rm_id, id, remote_id, msk->mpc_endpoint_id);
817 spin_unlock_bh(&msk->pm.lock);
822 spin_lock_bh(&msk->pm.lock);
828 __set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap);
832 if (!mptcp_pm_is_kernel(msk))
836 msk->pm.add_addr_accepted--;
837 WRITE_ONCE(msk->pm.accept_addr, true);
839 msk->pm.local_addr_used--;
844 static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
846 mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR);
849 void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
852 mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW);
855 void mptcp_pm_nl_work(struct mptcp_sock *msk)
857 struct mptcp_pm_data *pm = &msk->pm;
859 msk_owned_by_me(msk);
864 spin_lock_bh(&msk->pm.lock);
866 pr_debug("msk=%p status=%x", msk, pm->status);
869 mptcp_pm_nl_add_addr_received(msk);
873 mptcp_pm_nl_addr_send_ack(msk);
877 mptcp_pm_nl_rm_addr_received(msk);
881 mptcp_pm_nl_fully_established(msk);
885 mptcp_pm_nl_subflow_established(msk);
888 spin_unlock_bh(&msk->pm.lock);
1023 /* The subflow socket lock is acquired in a nested to the msk one
1024 * in several places, even by the TCP stack, and this msk is a kernel
1026 * modifiers in several places, re-init the lock class for the msk
1056 * under the msk socket lock. For the moment, that will not bring
1069 int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc)
1075 pernet = pm_nl_get_pernet_from_msk(msk);
1139 void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
1142 struct sock *sk = (struct sock *)msk;
1154 mptcp_for_each_subflow(msk, iter) {
1168 * is cheap under the msk socket lock
1291 struct mptcp_sock *msk;
1294 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1295 struct sock *sk = (struct sock *)msk;
1297 if (!READ_ONCE(msk->fully_established) ||
1298 mptcp_pm_is_userspace(msk))
1302 spin_lock_bh(&msk->pm.lock);
1303 mptcp_pm_create_subflow_or_signal_addr(msk);
1304 spin_unlock_bh(&msk->pm.lock);
1383 int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
1387 struct sock *sk = (struct sock *)msk;
1401 static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
1406 entry = mptcp_pm_del_add_timer(msk, addr, false);
1416 static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
1425 ret = remove_anno_list_by_saddr(msk, addr);
1427 spin_lock_bh(&msk->pm.lock);
1428 mptcp_pm_remove_addr(msk, &list);
1429 spin_unlock_bh(&msk->pm.lock);
1440 struct mptcp_sock *msk;
1446 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1447 struct sock *sk = (struct sock *)msk;
1450 if (mptcp_pm_is_userspace(msk))
1453 if (list_empty(&msk->conn_list)) {
1454 mptcp_pm_remove_anno_addr(msk, addr, false);
1459 remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr);
1460 mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
1463 mptcp_pm_remove_subflow(msk, &list);
1479 struct mptcp_sock *msk;
1483 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1484 struct sock *sk = (struct sock *)msk;
1487 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk))
1490 mptcp_local_address((struct sock_common *)msk, &msk_local);
1495 spin_lock_bh(&msk->pm.lock);
1496 mptcp_pm_remove_addr(msk, &list);
1497 mptcp_pm_nl_rm_subflow_received(msk, &list);
1498 spin_unlock_bh(&msk->pm.lock);
1521 /* the zero id address is special: the first address used by the msk
1557 void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
1563 if ((remove_anno_list_by_saddr(msk, &entry->addr) ||
1564 lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) &&
1570 spin_lock_bh(&msk->pm.lock);
1571 mptcp_pm_remove_addr(msk, &alist);
1572 spin_unlock_bh(&msk->pm.lock);
1576 void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
1583 if (lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
1587 if (remove_anno_list_by_saddr(msk, &entry->addr) &&
1593 spin_lock_bh(&msk->pm.lock);
1594 mptcp_pm_remove_addr(msk, &alist);
1595 spin_unlock_bh(&msk->pm.lock);
1598 mptcp_pm_remove_subflow(msk, &slist);
1605 struct mptcp_sock *msk;
1610 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1611 struct sock *sk = (struct sock *)msk;
1613 if (!mptcp_pm_is_userspace(msk)) {
1615 mptcp_pm_remove_addrs_and_subflows(msk, rm_list);
1868 static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk,
1875 spin_lock_bh(&msk->pm.lock);
1876 mptcp_pm_nl_rm_subflow_received(msk, &list);
1877 mptcp_pm_create_subflow_or_signal_addr(msk);
1878 spin_unlock_bh(&msk->pm.lock);
1886 struct mptcp_sock *msk;
1889 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1890 struct sock *sk = (struct sock *)msk;
1892 if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk))
1897 ret = mptcp_pm_nl_mp_prio_send_ack(msk, addr, NULL, bkup);
1899 mptcp_pm_nl_fullmesh(msk, addr);
1978 bool mptcp_userspace_pm_active(const struct mptcp_sock *msk)
1981 sock_net((const struct sock *)msk),
2035 const struct mptcp_sock *msk,
2038 const struct sock *sk = (const struct sock *)msk;
2042 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
2068 const struct mptcp_sock *msk,
2071 return mptcp_event_put_token_and_ssk(skb, msk, ssk);
2075 const struct mptcp_sock *msk,
2080 if (mptcp_event_put_token_and_ssk(skb, msk, ssk))
2097 const struct mptcp_sock *msk,
2100 int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token);
2105 if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side)))
2111 void mptcp_event_addr_removed(const struct mptcp_sock *msk, uint8_t id)
2113 struct net *net = sock_net((const struct sock *)msk);
2128 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
2146 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
2163 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
2251 void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk,
2254 struct net *net = sock_net((const struct sock *)msk);
2275 if (mptcp_event_created(skb, msk, ssk) < 0)
2279 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token) < 0)
2289 if (mptcp_event_sub_established(skb, msk, ssk) < 0)
2293 if (mptcp_event_sub_closed(skb, msk, ssk) < 0)