Lines Matching defs:idev

77 static void mld_ifc_event(struct inet6_dev *idev);
78 static bool mld_in_v1_mode(const struct inet6_dev *idev);
82 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
85 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
89 struct inet6_dev *idev);
111 #define mc_dereference(e, idev) \
112 rcu_dereference_protected(e, lockdep_is_held(&(idev)->mc_lock))
128 for (psf = mc_dereference((mc)->mca_sources, mc->idev); \
130 psf = mc_dereference(psf->sf_next, mc->idev))
138 for (psf = mc_dereference((mc)->mca_tomb, mc->idev); \
140 psf = mc_dereference(psf->sf_next, mc->idev))
142 #define for_each_mc_mclock(idev, mc) \
143 for (mc = mc_dereference((idev)->mc_list, idev); \
145 mc = mc_dereference(mc->next, idev))
147 #define for_each_mc_rcu(idev, mc) \
148 for (mc = rcu_dereference((idev)->mc_list); \
152 #define for_each_mc_tomb(idev, mc) \
153 for (mc = mc_dereference((idev)->mc_tomb, idev); \
155 mc = mc_dereference(mc->next, idev))
157 static int unsolicited_report_interval(struct inet6_dev *idev)
161 if (mld_in_v1_mode(idev))
162 iv = idev->cnf.mldv1_unsolicited_report_interval;
164 iv = idev->cnf.mldv2_unsolicited_report_interval;
271 struct inet6_dev *idev = __in6_dev_get(dev);
273 ip6_mc_leave_src(sk, mc_lst, idev);
274 if (idev)
275 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
295 struct inet6_dev *idev = NULL;
310 idev = __in6_dev_get(dev);
311 if (!idev)
313 if (idev->dead)
315 return idev;
333 struct inet6_dev *idev = __in6_dev_get(dev);
335 ip6_mc_leave_src(sk, mc_lst, idev);
336 if (idev)
337 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
366 struct inet6_dev *idev;
380 idev = ip6_mc_find_dev_rtnl(net, group, pgsr->gsr_interface);
381 if (!idev)
386 mutex_lock(&idev->mc_lock);
405 ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
406 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
430 ip6_mc_del_src(idev, group, omode, 1, source, 1);
480 ip6_mc_add_src(idev, group, omode, 1, source, 1);
482 mutex_unlock(&idev->mc_lock);
493 struct inet6_dev *idev;
508 idev = ip6_mc_find_dev_rtnl(net, group, gsf->gf_interface);
509 if (!idev)
544 mutex_lock(&idev->mc_lock);
545 err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
548 mutex_unlock(&idev->mc_lock);
553 mutex_unlock(&idev->mc_lock);
556 mutex_lock(&idev->mc_lock);
557 ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
558 mutex_unlock(&idev->mc_lock);
561 mutex_lock(&idev->mc_lock);
564 ip6_mc_del_src(idev, group, pmc->sfmode,
569 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
572 mutex_unlock(&idev->mc_lock);
670 struct net_device *dev = mc->idev->dev;
686 if (mld_in_v1_mode(mc->idev)) {
697 mc->mca_crcount = mc->idev->mc_qrv;
699 mld_ifc_event(mc->idev);
705 struct net_device *dev = mc->idev->dev;
721 if (!mc->idev->dead)
732 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
746 pmc->idev = im->idev;
747 in6_dev_hold(idev);
749 pmc->mca_crcount = idev->mc_qrv;
755 mc_dereference(im->mca_tomb, idev));
757 mc_dereference(im->mca_sources, idev));
765 rcu_assign_pointer(pmc->next, idev->mc_tomb);
766 rcu_assign_pointer(idev->mc_tomb, pmc);
770 static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
777 for_each_mc_tomb(idev, pmc) {
786 rcu_assign_pointer(idev->mc_tomb, pmc->next);
790 im->idev = pmc->idev;
793 mc_dereference(pmc->mca_tomb, pmc->idev),
794 lockdep_is_held(&im->idev->mc_lock));
798 mc_dereference(pmc->mca_sources, pmc->idev),
799 lockdep_is_held(&im->idev->mc_lock));
802 psf->sf_crcount = idev->mc_qrv;
804 im->mca_crcount = idev->mc_qrv;
806 in6_dev_put(pmc->idev);
813 static void mld_clear_delrec(struct inet6_dev *idev)
817 pmc = mc_dereference(idev->mc_tomb, idev);
818 RCU_INIT_POINTER(idev->mc_tomb, NULL);
821 nextpmc = mc_dereference(pmc->next, idev);
823 in6_dev_put(pmc->idev);
828 for_each_mc_mclock(idev, pmc) {
831 psf = mc_dereference(pmc->mca_tomb, idev);
834 psf_next = mc_dereference(psf->sf_next, idev);
840 static void mld_clear_query(struct inet6_dev *idev)
844 spin_lock_bh(&idev->mc_query_lock);
845 while ((skb = __skb_dequeue(&idev->mc_query_queue)))
847 spin_unlock_bh(&idev->mc_query_lock);
850 static void mld_clear_report(struct inet6_dev *idev)
854 spin_lock_bh(&idev->mc_report_lock);
855 while ((skb = __skb_dequeue(&idev->mc_report_queue)))
857 spin_unlock_bh(&idev->mc_report_lock);
868 in6_dev_put(mc->idev);
874 static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
887 mc->idev = idev; /* reference taken by caller */
910 struct inet6_dev *idev;
914 /* we need to take a reference on idev */
915 idev = in6_dev_get(dev);
917 if (!idev)
920 if (idev->dead) {
921 in6_dev_put(idev);
925 mutex_lock(&idev->mc_lock);
926 for_each_mc_mclock(idev, mc) {
929 ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
930 mutex_unlock(&idev->mc_lock);
931 in6_dev_put(idev);
936 mc = mca_alloc(idev, addr, mode);
938 mutex_unlock(&idev->mc_lock);
939 in6_dev_put(idev);
943 rcu_assign_pointer(mc->next, idev->mc_list);
944 rcu_assign_pointer(idev->mc_list, mc);
948 mld_del_delrec(idev, mc);
950 mutex_unlock(&idev->mc_lock);
964 int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
970 mutex_lock(&idev->mc_lock);
971 for (map = &idev->mc_list;
972 (ma = mc_dereference(*map, idev));
980 mutex_unlock(&idev->mc_lock);
985 mutex_unlock(&idev->mc_lock);
990 mutex_unlock(&idev->mc_lock);
996 struct inet6_dev *idev;
1001 idev = __in6_dev_get(dev);
1002 if (!idev)
1005 err = __ipv6_dev_mc_dec(idev, addr);
1017 struct inet6_dev *idev;
1022 idev = __in6_dev_get(dev);
1023 if (idev) {
1024 for_each_mc_rcu(idev, mc) {
1051 static void mld_gq_start_work(struct inet6_dev *idev)
1053 unsigned long tv = get_random_u32_below(idev->mc_maxdelay);
1055 idev->mc_gq_running = 1;
1056 if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2))
1057 in6_dev_hold(idev);
1061 static void mld_gq_stop_work(struct inet6_dev *idev)
1063 idev->mc_gq_running = 0;
1064 if (cancel_delayed_work(&idev->mc_gq_work))
1065 __in6_dev_put(idev);
1069 static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay)
1073 if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2))
1074 in6_dev_hold(idev);
1078 static void mld_ifc_stop_work(struct inet6_dev *idev)
1080 idev->mc_ifc_count = 0;
1081 if (cancel_delayed_work(&idev->mc_ifc_work))
1082 __in6_dev_put(idev);
1086 static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay)
1090 if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2))
1091 in6_dev_hold(idev);
1094 static void mld_dad_stop_work(struct inet6_dev *idev)
1096 if (cancel_delayed_work(&idev->mc_dad_work))
1097 __in6_dev_put(idev);
1100 static void mld_query_stop_work(struct inet6_dev *idev)
1102 spin_lock_bh(&idev->mc_query_lock);
1103 if (cancel_delayed_work(&idev->mc_query_work))
1104 __in6_dev_put(idev);
1105 spin_unlock_bh(&idev->mc_query_lock);
1108 static void mld_report_stop_work(struct inet6_dev *idev)
1110 if (cancel_delayed_work_sync(&idev->mc_report_work))
1111 __in6_dev_put(idev);
1203 static int mld_force_mld_version(const struct inet6_dev *idev)
1210 if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
1211 return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
1213 return idev->cnf.force_mld_version;
1216 static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
1218 return mld_force_mld_version(idev) == 2;
1221 static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
1223 return mld_force_mld_version(idev) == 1;
1226 static bool mld_in_v1_mode(const struct inet6_dev *idev)
1228 if (mld_in_v2_mode_only(idev))
1230 if (mld_in_v1_mode_only(idev))
1232 if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
1238 static void mld_set_v1_mode(struct inet6_dev *idev)
1248 switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
1250 idev->mc_v1_seen = jiffies + switchback;
1253 static void mld_update_qrv(struct inet6_dev *idev,
1266 WARN_ON(idev->mc_qrv == 0);
1269 idev->mc_qrv = mlh2->mld2q_qrv;
1271 if (unlikely(idev->mc_qrv < min_qrv)) {
1273 idev->mc_qrv, min_qrv);
1274 idev->mc_qrv = min_qrv;
1278 static void mld_update_qi(struct inet6_dev *idev,
1300 idev->mc_qi = mc_qqi * HZ;
1303 static void mld_update_qri(struct inet6_dev *idev,
1310 idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
1313 static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1319 if (mld_in_v2_mode_only(idev))
1347 mld_set_v1_mode(idev);
1350 mld_gq_stop_work(idev);
1352 mld_ifc_stop_work(idev);
1354 mld_clear_delrec(idev);
1359 static void mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
1364 mld_update_qrv(idev, mld);
1365 mld_update_qi(idev, mld);
1366 mld_update_qri(idev, mld);
1368 idev->mc_maxdelay = *max_delay;
1376 struct inet6_dev *idev = __in6_dev_get(skb->dev);
1378 if (!idev || idev->dead)
1381 spin_lock_bh(&idev->mc_query_lock);
1382 if (skb_queue_len(&idev->mc_query_queue) < MLD_MAX_SKBS) {
1383 __skb_queue_tail(&idev->mc_query_queue, skb);
1384 if (!mod_delayed_work(mld_wq, &idev->mc_query_work, 0))
1385 in6_dev_hold(idev);
1388 spin_unlock_bh(&idev->mc_query_lock);
1398 struct inet6_dev *idev;
1425 idev = in6_dev_get(skb->dev);
1426 if (!idev)
1439 } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) {
1440 err = mld_process_v1(idev, mld, &max_delay,
1453 mld_process_v2(idev, mlh2, &max_delay);
1459 mld_gq_start_work(idev);
1476 for_each_mc_mclock(idev, ma) {
1480 for_each_mc_mclock(idev, ma) {
1502 in6_dev_put(idev);
1509 struct inet6_dev *idev = container_of(to_delayed_work(work),
1519 spin_lock_bh(&idev->mc_query_lock);
1520 while ((skb = __skb_dequeue(&idev->mc_query_queue))) {
1528 spin_unlock_bh(&idev->mc_query_lock);
1530 mutex_lock(&idev->mc_lock);
1533 mutex_unlock(&idev->mc_lock);
1535 if (rework && queue_delayed_work(mld_wq, &idev->mc_query_work, 0))
1538 in6_dev_put(idev);
1544 struct inet6_dev *idev = __in6_dev_get(skb->dev);
1546 if (!idev || idev->dead)
1549 spin_lock_bh(&idev->mc_report_lock);
1550 if (skb_queue_len(&idev->mc_report_queue) < MLD_MAX_SKBS) {
1551 __skb_queue_tail(&idev->mc_report_queue, skb);
1552 if (!mod_delayed_work(mld_wq, &idev->mc_report_work, 0))
1553 in6_dev_hold(idev);
1556 spin_unlock_bh(&idev->mc_report_lock);
1563 struct inet6_dev *idev;
1588 idev = in6_dev_get(skb->dev);
1589 if (!idev)
1596 for_each_mc_mclock(idev, ma) {
1606 in6_dev_put(idev);
1613 struct inet6_dev *idev = container_of(to_delayed_work(work),
1622 spin_lock_bh(&idev->mc_report_lock);
1623 while ((skb = __skb_dequeue(&idev->mc_report_queue))) {
1631 spin_unlock_bh(&idev->mc_report_lock);
1633 mutex_lock(&idev->mc_lock);
1636 mutex_unlock(&idev->mc_lock);
1638 if (rework && queue_delayed_work(mld_wq, &idev->mc_report_work, 0))
1641 in6_dev_put(idev);
1725 static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
1729 struct net_device *dev = idev->dev;
1784 struct inet6_dev *idev;
1791 idev = __in6_dev_get(skb->dev);
1792 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1823 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1824 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1826 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1849 skb = mld_newpack(pmc->idev, mtu);
1874 struct inet6_dev *idev = pmc->idev;
1875 struct net_device *dev = idev->dev;
1907 skb = mld_newpack(idev, mtu);
1912 for (psf = mc_dereference(*psf_list, idev);
1917 psf_next = mc_dereference(psf->sf_next, idev);
1945 skb = mld_newpack(idev, mtu);
1965 mc_dereference(psf->sf_next, idev));
1968 mc_dereference(psf->sf_next, idev));
1999 static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
2005 for_each_mc_mclock(idev, pmc) {
2029 static void mld_clear_zeros(struct ip6_sf_list __rcu **ppsf, struct inet6_dev *idev)
2034 for (psf = mc_dereference(*ppsf, idev);
2037 psf_next = mc_dereference(psf->sf_next, idev);
2041 mc_dereference(psf->sf_next, idev));
2044 mc_dereference(psf->sf_next, idev));
2053 static void mld_send_cr(struct inet6_dev *idev)
2061 for (pmc = mc_dereference(idev->mc_tomb, idev);
2064 pmc_next = mc_dereference(pmc->next, idev);
2078 mld_clear_zeros(&pmc->mca_tomb, idev);
2079 mld_clear_zeros(&pmc->mca_sources, idev);
2088 rcu_assign_pointer(idev->mc_tomb, pmc_next);
2089 in6_dev_put(pmc->idev);
2096 for_each_mc_mclock(idev, pmc) {
2126 struct inet6_dev *idev;
2188 idev = __in6_dev_get(skb->dev);
2205 ICMP6MSGOUT_INC_STATS(net, idev, type);
2206 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2208 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2219 static void mld_send_initial_cr(struct inet6_dev *idev)
2225 if (mld_in_v1_mode(idev))
2229 for_each_mc_mclock(idev, pmc) {
2240 void ipv6_mc_dad_complete(struct inet6_dev *idev)
2242 mutex_lock(&idev->mc_lock);
2243 idev->mc_dad_count = idev->mc_qrv;
2244 if (idev->mc_dad_count) {
2245 mld_send_initial_cr(idev);
2246 idev->mc_dad_count--;
2247 if (idev->mc_dad_count)
2248 mld_dad_start_work(idev,
2249 unsolicited_report_interval(idev));
2251 mutex_unlock(&idev->mc_lock);
2256 struct inet6_dev *idev = container_of(to_delayed_work(work),
2259 mutex_lock(&idev->mc_lock);
2260 mld_send_initial_cr(idev);
2261 if (idev->mc_dad_count) {
2262 idev->mc_dad_count--;
2263 if (idev->mc_dad_count)
2264 mld_dad_start_work(idev,
2265 unsolicited_report_interval(idev));
2267 mutex_unlock(&idev->mc_lock);
2268 in6_dev_put(idev);
2290 struct inet6_dev *idev = pmc->idev;
2295 mc_dereference(psf->sf_next, idev));
2298 mc_dereference(psf->sf_next, idev));
2301 !mld_in_v1_mode(idev)) {
2302 psf->sf_crcount = idev->mc_qrv;
2304 mc_dereference(pmc->mca_tomb, idev));
2315 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2323 if (!idev)
2326 for_each_mc_mclock(idev, pmc) {
2355 pmc->mca_crcount = idev->mc_qrv;
2356 idev->mc_ifc_count = pmc->mca_crcount;
2359 mld_ifc_event(pmc->idev);
2361 mld_ifc_event(pmc->idev);
2420 int qrv = pmc->idev->mc_qrv;
2444 pmc->idev));
2448 pmc->idev));
2471 mc_dereference(pmc->mca_tomb, pmc->idev));
2485 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2493 if (!idev)
2496 for_each_mc_mclock(idev, pmc) {
2530 pmc->mca_crcount = idev->mc_qrv;
2531 idev->mc_ifc_count = pmc->mca_crcount;
2534 mld_ifc_event(idev);
2536 mld_ifc_event(idev);
2546 for (psf = mc_dereference(pmc->mca_tomb, pmc->idev);
2549 nextpsf = mc_dereference(psf->sf_next, pmc->idev);
2553 for (psf = mc_dereference(pmc->mca_sources, pmc->idev);
2556 nextpsf = mc_dereference(psf->sf_next, pmc->idev);
2573 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2575 delay = get_random_u32_below(unsolicited_report_interval(ma->idev));
2588 struct inet6_dev *idev)
2595 if (idev)
2596 mutex_lock(&idev->mc_lock);
2600 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2602 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2610 if (idev)
2611 mutex_unlock(&idev->mc_lock);
2619 if (mld_in_v1_mode(ma->idev)) {
2621 igmp6_send(&ma->mca_addr, ma->idev->dev,
2625 mld_add_delrec(ma->idev, ma);
2626 mld_ifc_event(ma->idev);
2632 struct inet6_dev *idev = container_of(to_delayed_work(work),
2636 mutex_lock(&idev->mc_lock);
2637 mld_send_report(idev, NULL);
2638 idev->mc_gq_running = 0;
2639 mutex_unlock(&idev->mc_lock);
2641 in6_dev_put(idev);
2646 struct inet6_dev *idev = container_of(to_delayed_work(work),
2650 mutex_lock(&idev->mc_lock);
2651 mld_send_cr(idev);
2653 if (idev->mc_ifc_count) {
2654 idev->mc_ifc_count--;
2655 if (idev->mc_ifc_count)
2656 mld_ifc_start_work(idev,
2657 unsolicited_report_interval(idev));
2659 mutex_unlock(&idev->mc_lock);
2660 in6_dev_put(idev);
2664 static void mld_ifc_event(struct inet6_dev *idev)
2666 if (mld_in_v1_mode(idev))
2669 idev->mc_ifc_count = idev->mc_qrv;
2670 mld_ifc_start_work(idev, 1);
2678 mutex_lock(&ma->idev->mc_lock);
2679 if (mld_in_v1_mode(ma->idev))
2680 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2682 mld_send_report(ma->idev, ma);
2685 mutex_unlock(&ma->idev->mc_lock);
2692 void ipv6_mc_unmap(struct inet6_dev *idev)
2698 mutex_lock(&idev->mc_lock);
2699 for_each_mc_mclock(idev, i)
2701 mutex_unlock(&idev->mc_lock);
2704 void ipv6_mc_remap(struct inet6_dev *idev)
2706 ipv6_mc_up(idev);
2710 void ipv6_mc_down(struct inet6_dev *idev)
2714 mutex_lock(&idev->mc_lock);
2716 for_each_mc_mclock(idev, i)
2718 mutex_unlock(&idev->mc_lock);
2723 mld_query_stop_work(idev);
2724 mld_report_stop_work(idev);
2726 mutex_lock(&idev->mc_lock);
2727 mld_ifc_stop_work(idev);
2728 mld_gq_stop_work(idev);
2729 mutex_unlock(&idev->mc_lock);
2731 mld_dad_stop_work(idev);
2734 static void ipv6_mc_reset(struct inet6_dev *idev)
2736 idev->mc_qrv = sysctl_mld_qrv;
2737 idev->mc_qi = MLD_QI_DEFAULT;
2738 idev->mc_qri = MLD_QRI_DEFAULT;
2739 idev->mc_v1_seen = 0;
2740 idev->mc_maxdelay = unsolicited_report_interval(idev);
2745 void ipv6_mc_up(struct inet6_dev *idev)
2751 ipv6_mc_reset(idev);
2752 mutex_lock(&idev->mc_lock);
2753 for_each_mc_mclock(idev, i) {
2754 mld_del_delrec(idev, i);
2757 mutex_unlock(&idev->mc_lock);
2762 void ipv6_mc_init_dev(struct inet6_dev *idev)
2764 idev->mc_gq_running = 0;
2765 INIT_DELAYED_WORK(&idev->mc_gq_work, mld_gq_work);
2766 RCU_INIT_POINTER(idev->mc_tomb, NULL);
2767 idev->mc_ifc_count = 0;
2768 INIT_DELAYED_WORK(&idev->mc_ifc_work, mld_ifc_work);
2769 INIT_DELAYED_WORK(&idev->mc_dad_work, mld_dad_work);
2770 INIT_DELAYED_WORK(&idev->mc_query_work, mld_query_work);
2771 INIT_DELAYED_WORK(&idev->mc_report_work, mld_report_work);
2772 skb_queue_head_init(&idev->mc_query_queue);
2773 skb_queue_head_init(&idev->mc_report_queue);
2774 spin_lock_init(&idev->mc_query_lock);
2775 spin_lock_init(&idev->mc_report_lock);
2776 mutex_init(&idev->mc_lock);
2777 ipv6_mc_reset(idev);
2784 void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2789 ipv6_mc_down(idev);
2790 mutex_lock(&idev->mc_lock);
2791 mld_clear_delrec(idev);
2792 mutex_unlock(&idev->mc_lock);
2793 mld_clear_query(idev);
2794 mld_clear_report(idev);
2801 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2803 if (idev->cnf.forwarding)
2804 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2806 mutex_lock(&idev->mc_lock);
2807 while ((i = mc_dereference(idev->mc_list, idev))) {
2808 rcu_assign_pointer(idev->mc_list, mc_dereference(i->next, idev));
2813 mutex_unlock(&idev->mc_lock);
2816 static void ipv6_mc_rejoin_groups(struct inet6_dev *idev)
2822 mutex_lock(&idev->mc_lock);
2823 if (mld_in_v1_mode(idev)) {
2824 for_each_mc_mclock(idev, pmc)
2827 mld_send_report(idev, NULL);
2829 mutex_unlock(&idev->mc_lock);
2837 struct inet6_dev *idev = __in6_dev_get(dev);
2841 if (idev)
2842 ipv6_mc_rejoin_groups(idev);
2859 struct inet6_dev *idev;
2870 state->idev = NULL;
2872 struct inet6_dev *idev;
2873 idev = __in6_dev_get(state->dev);
2874 if (!idev)
2877 im = rcu_dereference(idev->mc_list);
2879 state->idev = idev;
2894 state->idev = NULL;
2897 state->idev = __in6_dev_get(state->dev);
2898 if (!state->idev)
2900 im = rcu_dereference(state->idev->mc_list);
2934 if (likely(state->idev))
2935 state->idev = NULL;
2965 struct inet6_dev *idev;
2978 state->idev = NULL;
2981 struct inet6_dev *idev;
2982 idev = __in6_dev_get(state->dev);
2983 if (unlikely(idev == NULL))
2986 im = rcu_dereference(idev->mc_list);
2991 state->idev = idev;
3009 state->idev = NULL;
3012 state->idev = __in6_dev_get(state->dev);
3013 if (!state->idev)
3015 state->im = rcu_dereference(state->idev->mc_list);
3059 if (likely(state->idev))
3060 state->idev = NULL;