Lines Matching refs:nlk
364 struct netlink_sock *nlk = nlk_sk(sk);
367 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
368 if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
369 wake_up_interruptible(&nlk->wait);
396 struct netlink_sock *nlk = nlk_sk(sk);
398 if (nlk->cb_running) {
399 if (nlk->cb.done)
400 nlk->cb.done(&nlk->cb);
401 module_put(nlk->cb.module);
402 kfree_skb(nlk->cb.skb);
419 struct netlink_sock *nlk = container_of(work, struct netlink_sock,
422 sk_free(&nlk->sk);
496 const struct netlink_sock *nlk = ptr;
498 return nlk->portid != x->portid ||
499 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
643 struct netlink_sock *nlk;
653 nlk = nlk_sk(sk);
655 nlk->cb_mutex = cb_mutex;
657 nlk->cb_mutex = &nlk->cb_def_mutex;
658 mutex_init(nlk->cb_mutex);
659 lockdep_set_class_and_name(nlk->cb_mutex,
663 init_waitqueue_head(&nlk->wait);
675 struct netlink_sock *nlk;
718 nlk = nlk_sk(sock->sk);
719 nlk->module = module;
720 nlk->netlink_bind = bind;
721 nlk->netlink_unbind = unbind;
722 nlk->netlink_release = release;
733 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
734 struct sock *sk = &nlk->sk;
736 kfree(nlk->groups);
737 nlk->groups = NULL;
742 if (nlk->cb_running && nlk->cb.done) {
743 INIT_WORK(&nlk->work, netlink_sock_destruct_work);
744 schedule_work(&nlk->work);
754 struct netlink_sock *nlk;
761 nlk = nlk_sk(sk);
767 if (nlk->netlink_release)
768 nlk->netlink_release(sk, nlk->groups);
773 if (nlk->netlink_unbind) {
776 for (i = 0; i < nlk->ngroups; i++)
777 if (test_bit(i, nlk->groups))
778 nlk->netlink_unbind(sock_net(sk), i + 1);
785 wake_up_interruptible_all(&nlk->wait);
789 if (nlk->portid && nlk->bound) {
793 .portid = nlk->portid,
799 module_put(nlk->module);
831 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
948 struct netlink_sock *nlk = nlk_sk(sk);
950 if (nlk->subscriptions && !subscriptions)
952 else if (!nlk->subscriptions && subscriptions)
954 nlk->subscriptions = subscriptions;
959 struct netlink_sock *nlk = nlk_sk(sk);
972 if (nlk->ngroups >= groups)
975 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
980 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
981 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
983 nlk->groups = new_groups;
984 nlk->ngroups = groups;
993 struct netlink_sock *nlk = nlk_sk(sk);
996 if (!nlk->netlink_unbind)
1001 nlk->netlink_unbind(sock_net(sk), undo + 1);
1009 struct netlink_sock *nlk = nlk_sk(sk);
1031 if (nlk->ngroups < BITS_PER_LONG)
1032 groups &= (1UL << nlk->ngroups) - 1;
1035 bound = READ_ONCE(nlk->bound);
1037 /* Ensure nlk->portid is up-to-date. */
1040 if (nladdr->nl_pid != nlk->portid)
1044 if (nlk->netlink_bind && groups) {
1051 err = nlk->netlink_bind(net, group + 1);
1073 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1078 netlink_update_subscriptions(sk, nlk->subscriptions +
1080 hweight32(nlk->groups[0]));
1081 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
1097 struct netlink_sock *nlk = nlk_sk(sk);
1107 WRITE_ONCE(nlk->dst_portid, 0);
1108 WRITE_ONCE(nlk->dst_group, 0);
1125 if (!READ_ONCE(nlk->bound))
1132 WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
1133 WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
1143 struct netlink_sock *nlk = nlk_sk(sk);
1151 nladdr->nl_pid = READ_ONCE(nlk->dst_portid);
1152 nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group));
1155 nladdr->nl_pid = READ_ONCE(nlk->portid);
1157 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1174 struct netlink_sock *nlk;
1181 nlk = nlk_sk(sock);
1184 READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
1245 struct netlink_sock *nlk;
1247 nlk = nlk_sk(sk);
1250 test_bit(NETLINK_S_CONGESTED, &nlk->state))) {
1261 add_wait_queue(&nlk->wait, &wait);
1264 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
1269 remove_wait_queue(&nlk->wait, &wait);
1334 struct netlink_sock *nlk = nlk_sk(sk);
1337 if (nlk->netlink_rcv != NULL) {
1342 nlk->netlink_rcv(skb);
1414 struct netlink_sock *nlk = nlk_sk(sk);
1417 !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
1443 struct netlink_sock *nlk = nlk_sk(sk);
1449 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1450 !test_bit(p->group - 1, nlk->groups))
1590 struct netlink_sock *nlk = nlk_sk(sk);
1599 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1600 !test_bit(p->group - 1, nlk->groups))
1648 static void netlink_update_socket_mc(struct netlink_sock *nlk,
1654 old = test_bit(group - 1, nlk->groups);
1655 subscriptions = nlk->subscriptions - old + new;
1656 __assign_bit(group - 1, nlk->groups, new);
1657 netlink_update_subscriptions(&nlk->sk, subscriptions);
1658 netlink_update_listeners(&nlk->sk);
1665 struct netlink_sock *nlk = nlk_sk(sk);
1689 if (!val || val - 1 >= nlk->ngroups)
1691 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
1692 err = nlk->netlink_bind(sock_net(sk), val);
1697 netlink_update_socket_mc(nlk, val,
1700 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
1701 nlk->netlink_unbind(sock_net(sk), val);
1709 assign_bit(NETLINK_F_RECV_NO_ENOBUFS, &nlk->flags, val);
1711 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
1712 wake_up_interruptible(&nlk->wait);
1733 assign_bit(nr, &nlk->flags, val);
1741 struct netlink_sock *nlk = nlk_sk(sk);
1767 for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
1773 if (put_user((u32)(nlk->groups[idx] >> shift),
1779 if (put_user(ALIGN(BITS_TO_BYTES(nlk->ngroups), sizeof(u32)), optlen))
1801 val = test_bit(flag, &nlk->flags);
1831 struct netlink_sock *nlk = nlk_sk(sk);
1867 dst_portid = READ_ONCE(nlk->dst_portid);
1868 dst_group = READ_ONCE(nlk->dst_group);
1872 if (!READ_ONCE(nlk->bound)) {
1877 /* Ensure nlk is hashed and visible. */
1889 NETLINK_CB(skb).portid = nlk->portid;
1922 struct netlink_sock *nlk = nlk_sk(sk);
1956 max_recvmsg_len = max(READ_ONCE(nlk->max_recvmsg_len), len);
1959 WRITE_ONCE(nlk->max_recvmsg_len, max_recvmsg_len);
1990 if (READ_ONCE(nlk->cb_running) &&
2022 struct netlink_sock *nlk;
2056 nlk = nlk_sk(sk);
2057 set_bit(NETLINK_F_KERNEL_SOCKET, &nlk->flags);
2178 static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
2184 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(nlk->dump_done_errno),
2190 memcpy(nlmsg_data(nlh), &nlk->dump_done_errno, sizeof(nlk->dump_done_errno));
2192 if (extack->_msg && test_bit(NETLINK_F_EXT_ACK, &nlk->flags)) {
2203 struct netlink_sock *nlk = nlk_sk(sk);
2213 mutex_lock(nlk->cb_mutex);
2214 if (!nlk->cb_running) {
2227 cb = &nlk->cb;
2230 max_recvmsg_len = READ_ONCE(nlk->max_recvmsg_len);
2264 if (nlk->dump_done_errno > 0) {
2266 nlk->dump_done_errno = cb->dump(skb, cb);
2270 if (nlk->dump_done_errno > 0 ||
2271 skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
2272 mutex_unlock(nlk->cb_mutex);
2281 if (netlink_dump_done(nlk, skb, cb, &extack))
2290 if (netlink_dump_done(nlk, skb_shinfo(skb)->frag_list, cb, &extack))
2303 WRITE_ONCE(nlk->cb_running, false);
2306 mutex_unlock(nlk->cb_mutex);
2312 mutex_unlock(nlk->cb_mutex);
2322 struct netlink_sock *nlk;
2334 nlk = nlk_sk(sk);
2335 mutex_lock(nlk->cb_mutex);
2337 if (nlk->cb_running) {
2347 cb = &nlk->cb;
2367 WRITE_ONCE(nlk->cb_running, true);
2368 nlk->dump_done_errno = INT_MAX;
2370 mutex_unlock(nlk->cb_mutex);
2388 mutex_unlock(nlk->cb_mutex);
2396 netlink_ack_tlv_len(struct netlink_sock *nlk, int err,
2401 if (!extack || !test_bit(NETLINK_F_EXT_ACK, &nlk->flags))
2465 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
2473 if (err && !test_bit(NETLINK_F_CAP_ACK, &nlk->flags))
2478 tlvlen = netlink_ack_tlv_len(nlk, err, extack);
2627 struct netlink_sock *nlk;
2631 nlk = rhashtable_walk_next(&iter->hti);
2633 if (IS_ERR(nlk)) {
2634 if (PTR_ERR(nlk) == -EAGAIN)
2637 return nlk;
2640 if (nlk)
2649 } while (sock_net(&nlk->sk) != seq_file_net(seq));
2651 return nlk;
2696 struct netlink_sock *nlk = nlk_sk(s);
2701 nlk->portid,
2702 nlk->groups ? (u32)nlk->groups[0] : 0,
2705 READ_ONCE(nlk->cb_running),
2867 const struct netlink_sock *nlk = data;
2870 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);