Lines Matching refs:idev
77 static void mld_ifc_event(struct inet6_dev *idev);
78 static bool mld_in_v1_mode(const struct inet6_dev *idev);
82 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
85 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
89 struct inet6_dev *idev);
111 #define mc_dereference(e, idev) \ argument
112 rcu_dereference_protected(e, lockdep_is_held(&(idev)->mc_lock))
128 for (psf = mc_dereference((mc)->mca_sources, mc->idev); \
130 psf = mc_dereference(psf->sf_next, mc->idev))
138 for (psf = mc_dereference((mc)->mca_tomb, mc->idev); \
140 psf = mc_dereference(psf->sf_next, mc->idev))
142 #define for_each_mc_mclock(idev, mc) \ argument
143 for (mc = mc_dereference((idev)->mc_list, idev); \
145 mc = mc_dereference(mc->next, idev))
147 #define for_each_mc_rcu(idev, mc) \ argument
148 for (mc = rcu_dereference((idev)->mc_list); \
152 #define for_each_mc_tomb(idev, mc) \ argument
153 for (mc = mc_dereference((idev)->mc_tomb, idev); \
155 mc = mc_dereference(mc->next, idev))
157 static int unsolicited_report_interval(struct inet6_dev *idev) in unsolicited_report_interval() argument
161 if (mld_in_v1_mode(idev)) in unsolicited_report_interval()
162 iv = idev->cnf.mldv1_unsolicited_report_interval; in unsolicited_report_interval()
164 iv = idev->cnf.mldv2_unsolicited_report_interval; in unsolicited_report_interval()
271 struct inet6_dev *idev = __in6_dev_get(dev); in ipv6_sock_mc_drop() local
273 ip6_mc_leave_src(sk, mc_lst, idev); in ipv6_sock_mc_drop()
274 if (idev) in ipv6_sock_mc_drop()
275 __ipv6_dev_mc_dec(idev, &mc_lst->addr); in ipv6_sock_mc_drop()
295 struct inet6_dev *idev = NULL; in ip6_mc_find_dev_rtnl() local
310 idev = __in6_dev_get(dev); in ip6_mc_find_dev_rtnl()
311 if (!idev) in ip6_mc_find_dev_rtnl()
313 if (idev->dead) in ip6_mc_find_dev_rtnl()
315 return idev; in ip6_mc_find_dev_rtnl()
333 struct inet6_dev *idev = __in6_dev_get(dev); in __ipv6_sock_mc_close() local
335 ip6_mc_leave_src(sk, mc_lst, idev); in __ipv6_sock_mc_close()
336 if (idev) in __ipv6_sock_mc_close()
337 __ipv6_dev_mc_dec(idev, &mc_lst->addr); in __ipv6_sock_mc_close()
366 struct inet6_dev *idev; in ip6_mc_source() local
380 idev = ip6_mc_find_dev_rtnl(net, group, pgsr->gsr_interface); in ip6_mc_source()
381 if (!idev) in ip6_mc_source()
386 mutex_lock(&idev->mc_lock); in ip6_mc_source()
405 ip6_mc_add_src(idev, group, omode, 0, NULL, 0); in ip6_mc_source()
406 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); in ip6_mc_source()
430 ip6_mc_del_src(idev, group, omode, 1, source, 1); in ip6_mc_source()
480 ip6_mc_add_src(idev, group, omode, 1, source, 1); in ip6_mc_source()
482 mutex_unlock(&idev->mc_lock); in ip6_mc_source()
493 struct inet6_dev *idev; in ip6_mc_msfilter() local
508 idev = ip6_mc_find_dev_rtnl(net, group, gsf->gf_interface); in ip6_mc_msfilter()
509 if (!idev) in ip6_mc_msfilter()
544 mutex_lock(&idev->mc_lock); in ip6_mc_msfilter()
545 err = ip6_mc_add_src(idev, group, gsf->gf_fmode, in ip6_mc_msfilter()
548 mutex_unlock(&idev->mc_lock); in ip6_mc_msfilter()
553 mutex_unlock(&idev->mc_lock); in ip6_mc_msfilter()
556 mutex_lock(&idev->mc_lock); in ip6_mc_msfilter()
557 ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0); in ip6_mc_msfilter()
558 mutex_unlock(&idev->mc_lock); in ip6_mc_msfilter()
561 mutex_lock(&idev->mc_lock); in ip6_mc_msfilter()
564 ip6_mc_del_src(idev, group, pmc->sfmode, in ip6_mc_msfilter()
570 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); in ip6_mc_msfilter()
572 mutex_unlock(&idev->mc_lock); in ip6_mc_msfilter()
670 struct net_device *dev = mc->idev->dev; in igmp6_group_added()
686 if (mld_in_v1_mode(mc->idev)) { in igmp6_group_added()
697 mc->mca_crcount = mc->idev->mc_qrv; in igmp6_group_added()
699 mld_ifc_event(mc->idev); in igmp6_group_added()
705 struct net_device *dev = mc->idev->dev; in igmp6_group_dropped()
721 if (!mc->idev->dead) in igmp6_group_dropped()
732 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) in mld_add_delrec() argument
746 pmc->idev = im->idev; in mld_add_delrec()
747 in6_dev_hold(idev); in mld_add_delrec()
749 pmc->mca_crcount = idev->mc_qrv; in mld_add_delrec()
755 mc_dereference(im->mca_tomb, idev)); in mld_add_delrec()
757 mc_dereference(im->mca_sources, idev)); in mld_add_delrec()
765 rcu_assign_pointer(pmc->next, idev->mc_tomb); in mld_add_delrec()
766 rcu_assign_pointer(idev->mc_tomb, pmc); in mld_add_delrec()
770 static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) in mld_del_delrec() argument
777 for_each_mc_tomb(idev, pmc) { in mld_del_delrec()
786 rcu_assign_pointer(idev->mc_tomb, pmc->next); in mld_del_delrec()
790 im->idev = pmc->idev; in mld_del_delrec()
793 mc_dereference(pmc->mca_tomb, pmc->idev), in mld_del_delrec()
794 lockdep_is_held(&im->idev->mc_lock)); in mld_del_delrec()
798 mc_dereference(pmc->mca_sources, pmc->idev), in mld_del_delrec()
799 lockdep_is_held(&im->idev->mc_lock)); in mld_del_delrec()
802 psf->sf_crcount = idev->mc_qrv; in mld_del_delrec()
804 im->mca_crcount = idev->mc_qrv; in mld_del_delrec()
806 in6_dev_put(pmc->idev); in mld_del_delrec()
813 static void mld_clear_delrec(struct inet6_dev *idev) in mld_clear_delrec() argument
817 pmc = mc_dereference(idev->mc_tomb, idev); in mld_clear_delrec()
818 RCU_INIT_POINTER(idev->mc_tomb, NULL); in mld_clear_delrec()
821 nextpmc = mc_dereference(pmc->next, idev); in mld_clear_delrec()
823 in6_dev_put(pmc->idev); in mld_clear_delrec()
828 for_each_mc_mclock(idev, pmc) { in mld_clear_delrec()
831 psf = mc_dereference(pmc->mca_tomb, idev); in mld_clear_delrec()
834 psf_next = mc_dereference(psf->sf_next, idev); in mld_clear_delrec()
840 static void mld_clear_query(struct inet6_dev *idev) in mld_clear_query() argument
844 spin_lock_bh(&idev->mc_query_lock); in mld_clear_query()
845 while ((skb = __skb_dequeue(&idev->mc_query_queue))) in mld_clear_query()
847 spin_unlock_bh(&idev->mc_query_lock); in mld_clear_query()
850 static void mld_clear_report(struct inet6_dev *idev) in mld_clear_report() argument
854 spin_lock_bh(&idev->mc_report_lock); in mld_clear_report()
855 while ((skb = __skb_dequeue(&idev->mc_report_queue))) in mld_clear_report()
857 spin_unlock_bh(&idev->mc_report_lock); in mld_clear_report()
868 in6_dev_put(mc->idev); in ma_put()
874 static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, in mca_alloc() argument
887 mc->idev = idev; /* reference taken by caller */ in mca_alloc()
910 struct inet6_dev *idev; in __ipv6_dev_mc_inc() local
915 idev = in6_dev_get(dev); in __ipv6_dev_mc_inc()
917 if (!idev) in __ipv6_dev_mc_inc()
920 if (idev->dead) { in __ipv6_dev_mc_inc()
921 in6_dev_put(idev); in __ipv6_dev_mc_inc()
925 mutex_lock(&idev->mc_lock); in __ipv6_dev_mc_inc()
926 for_each_mc_mclock(idev, mc) { in __ipv6_dev_mc_inc()
929 ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0); in __ipv6_dev_mc_inc()
930 mutex_unlock(&idev->mc_lock); in __ipv6_dev_mc_inc()
931 in6_dev_put(idev); in __ipv6_dev_mc_inc()
936 mc = mca_alloc(idev, addr, mode); in __ipv6_dev_mc_inc()
938 mutex_unlock(&idev->mc_lock); in __ipv6_dev_mc_inc()
939 in6_dev_put(idev); in __ipv6_dev_mc_inc()
943 rcu_assign_pointer(mc->next, idev->mc_list); in __ipv6_dev_mc_inc()
944 rcu_assign_pointer(idev->mc_list, mc); in __ipv6_dev_mc_inc()
948 mld_del_delrec(idev, mc); in __ipv6_dev_mc_inc()
950 mutex_unlock(&idev->mc_lock); in __ipv6_dev_mc_inc()
964 int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr) in __ipv6_dev_mc_dec() argument
970 mutex_lock(&idev->mc_lock); in __ipv6_dev_mc_dec()
971 for (map = &idev->mc_list; in __ipv6_dev_mc_dec()
972 (ma = mc_dereference(*map, idev)); in __ipv6_dev_mc_dec()
980 mutex_unlock(&idev->mc_lock); in __ipv6_dev_mc_dec()
985 mutex_unlock(&idev->mc_lock); in __ipv6_dev_mc_dec()
990 mutex_unlock(&idev->mc_lock); in __ipv6_dev_mc_dec()
996 struct inet6_dev *idev; in ipv6_dev_mc_dec() local
1001 idev = __in6_dev_get(dev); in ipv6_dev_mc_dec()
1002 if (!idev) in ipv6_dev_mc_dec()
1005 err = __ipv6_dev_mc_dec(idev, addr); in ipv6_dev_mc_dec()
1017 struct inet6_dev *idev; in ipv6_chk_mcast_addr() local
1022 idev = __in6_dev_get(dev); in ipv6_chk_mcast_addr()
1023 if (idev) { in ipv6_chk_mcast_addr()
1024 for_each_mc_rcu(idev, mc) { in ipv6_chk_mcast_addr()
1051 static void mld_gq_start_work(struct inet6_dev *idev) in mld_gq_start_work() argument
1053 unsigned long tv = prandom_u32() % idev->mc_maxdelay; in mld_gq_start_work()
1055 idev->mc_gq_running = 1; in mld_gq_start_work()
1056 if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2)) in mld_gq_start_work()
1057 in6_dev_hold(idev); in mld_gq_start_work()
1061 static void mld_gq_stop_work(struct inet6_dev *idev) in mld_gq_stop_work() argument
1063 idev->mc_gq_running = 0; in mld_gq_stop_work()
1064 if (cancel_delayed_work(&idev->mc_gq_work)) in mld_gq_stop_work()
1065 __in6_dev_put(idev); in mld_gq_stop_work()
1069 static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay) in mld_ifc_start_work() argument
1073 if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2)) in mld_ifc_start_work()
1074 in6_dev_hold(idev); in mld_ifc_start_work()
1078 static void mld_ifc_stop_work(struct inet6_dev *idev) in mld_ifc_stop_work() argument
1080 idev->mc_ifc_count = 0; in mld_ifc_stop_work()
1081 if (cancel_delayed_work(&idev->mc_ifc_work)) in mld_ifc_stop_work()
1082 __in6_dev_put(idev); in mld_ifc_stop_work()
1086 static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay) in mld_dad_start_work() argument
1090 if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2)) in mld_dad_start_work()
1091 in6_dev_hold(idev); in mld_dad_start_work()
1094 static void mld_dad_stop_work(struct inet6_dev *idev) in mld_dad_stop_work() argument
1096 if (cancel_delayed_work(&idev->mc_dad_work)) in mld_dad_stop_work()
1097 __in6_dev_put(idev); in mld_dad_stop_work()
1100 static void mld_query_stop_work(struct inet6_dev *idev) in mld_query_stop_work() argument
1102 spin_lock_bh(&idev->mc_query_lock); in mld_query_stop_work()
1103 if (cancel_delayed_work(&idev->mc_query_work)) in mld_query_stop_work()
1104 __in6_dev_put(idev); in mld_query_stop_work()
1105 spin_unlock_bh(&idev->mc_query_lock); in mld_query_stop_work()
1108 static void mld_report_stop_work(struct inet6_dev *idev) in mld_report_stop_work() argument
1110 if (cancel_delayed_work_sync(&idev->mc_report_work)) in mld_report_stop_work()
1111 __in6_dev_put(idev); in mld_report_stop_work()
1203 static int mld_force_mld_version(const struct inet6_dev *idev) in mld_force_mld_version() argument
1210 if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0) in mld_force_mld_version()
1211 return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version; in mld_force_mld_version()
1213 return idev->cnf.force_mld_version; in mld_force_mld_version()
1216 static bool mld_in_v2_mode_only(const struct inet6_dev *idev) in mld_in_v2_mode_only() argument
1218 return mld_force_mld_version(idev) == 2; in mld_in_v2_mode_only()
1221 static bool mld_in_v1_mode_only(const struct inet6_dev *idev) in mld_in_v1_mode_only() argument
1223 return mld_force_mld_version(idev) == 1; in mld_in_v1_mode_only()
1226 static bool mld_in_v1_mode(const struct inet6_dev *idev) in mld_in_v1_mode() argument
1228 if (mld_in_v2_mode_only(idev)) in mld_in_v1_mode()
1230 if (mld_in_v1_mode_only(idev)) in mld_in_v1_mode()
1232 if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen)) in mld_in_v1_mode()
1238 static void mld_set_v1_mode(struct inet6_dev *idev) in mld_set_v1_mode() argument
1248 switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri; in mld_set_v1_mode()
1250 idev->mc_v1_seen = jiffies + switchback; in mld_set_v1_mode()
1253 static void mld_update_qrv(struct inet6_dev *idev, in mld_update_qrv() argument
1266 WARN_ON(idev->mc_qrv == 0); in mld_update_qrv()
1269 idev->mc_qrv = mlh2->mld2q_qrv; in mld_update_qrv()
1271 if (unlikely(idev->mc_qrv < min_qrv)) { in mld_update_qrv()
1273 idev->mc_qrv, min_qrv); in mld_update_qrv()
1274 idev->mc_qrv = min_qrv; in mld_update_qrv()
1278 static void mld_update_qi(struct inet6_dev *idev, in mld_update_qi() argument
1300 idev->mc_qi = mc_qqi * HZ; in mld_update_qi()
1303 static void mld_update_qri(struct inet6_dev *idev, in mld_update_qri() argument
1310 idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2)); in mld_update_qri()
1313 static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld, in mld_process_v1() argument
1319 if (mld_in_v2_mode_only(idev)) in mld_process_v1()
1347 mld_set_v1_mode(idev); in mld_process_v1()
1350 mld_gq_stop_work(idev); in mld_process_v1()
1352 mld_ifc_stop_work(idev); in mld_process_v1()
1354 mld_clear_delrec(idev); in mld_process_v1()
1359 static void mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld, in mld_process_v2() argument
1364 mld_update_qrv(idev, mld); in mld_process_v2()
1365 mld_update_qi(idev, mld); in mld_process_v2()
1366 mld_update_qri(idev, mld); in mld_process_v2()
1368 idev->mc_maxdelay = *max_delay; in mld_process_v2()
1376 struct inet6_dev *idev = __in6_dev_get(skb->dev); in igmp6_event_query() local
1378 if (!idev) in igmp6_event_query()
1381 if (idev->dead) { in igmp6_event_query()
1386 spin_lock_bh(&idev->mc_query_lock); in igmp6_event_query()
1387 if (skb_queue_len(&idev->mc_query_queue) < MLD_MAX_SKBS) { in igmp6_event_query()
1388 __skb_queue_tail(&idev->mc_query_queue, skb); in igmp6_event_query()
1389 if (!mod_delayed_work(mld_wq, &idev->mc_query_work, 0)) in igmp6_event_query()
1390 in6_dev_hold(idev); in igmp6_event_query()
1392 spin_unlock_bh(&idev->mc_query_lock); in igmp6_event_query()
1402 struct inet6_dev *idev; in __mld_query_work() local
1429 idev = in6_dev_get(skb->dev); in __mld_query_work()
1430 if (!idev) in __mld_query_work()
1443 } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) { in __mld_query_work()
1444 err = mld_process_v1(idev, mld, &max_delay, in __mld_query_work()
1457 mld_process_v2(idev, mlh2, &max_delay); in __mld_query_work()
1463 mld_gq_start_work(idev); in __mld_query_work()
1480 for_each_mc_mclock(idev, ma) { in __mld_query_work()
1484 for_each_mc_mclock(idev, ma) { in __mld_query_work()
1506 in6_dev_put(idev); in __mld_query_work()
1513 struct inet6_dev *idev = container_of(to_delayed_work(work), in mld_query_work() local
1523 spin_lock_bh(&idev->mc_query_lock); in mld_query_work()
1524 while ((skb = __skb_dequeue(&idev->mc_query_queue))) { in mld_query_work()
1529 schedule_delayed_work(&idev->mc_query_work, 0); in mld_query_work()
1533 spin_unlock_bh(&idev->mc_query_lock); in mld_query_work()
1535 mutex_lock(&idev->mc_lock); in mld_query_work()
1538 mutex_unlock(&idev->mc_lock); in mld_query_work()
1541 in6_dev_put(idev); in mld_query_work()
1547 struct inet6_dev *idev = __in6_dev_get(skb->dev); in igmp6_event_report() local
1549 if (!idev) in igmp6_event_report()
1552 if (idev->dead) { in igmp6_event_report()
1557 spin_lock_bh(&idev->mc_report_lock); in igmp6_event_report()
1558 if (skb_queue_len(&idev->mc_report_queue) < MLD_MAX_SKBS) { in igmp6_event_report()
1559 __skb_queue_tail(&idev->mc_report_queue, skb); in igmp6_event_report()
1560 if (!mod_delayed_work(mld_wq, &idev->mc_report_work, 0)) in igmp6_event_report()
1561 in6_dev_hold(idev); in igmp6_event_report()
1563 spin_unlock_bh(&idev->mc_report_lock); in igmp6_event_report()
1570 struct inet6_dev *idev; in __mld_report_work() local
1595 idev = in6_dev_get(skb->dev); in __mld_report_work()
1596 if (!idev) in __mld_report_work()
1603 for_each_mc_mclock(idev, ma) { in __mld_report_work()
1613 in6_dev_put(idev); in __mld_report_work()
1620 struct inet6_dev *idev = container_of(to_delayed_work(work), in mld_report_work() local
1629 spin_lock_bh(&idev->mc_report_lock); in mld_report_work()
1630 while ((skb = __skb_dequeue(&idev->mc_report_queue))) { in mld_report_work()
1635 schedule_delayed_work(&idev->mc_report_work, 0); in mld_report_work()
1639 spin_unlock_bh(&idev->mc_report_lock); in mld_report_work()
1641 mutex_lock(&idev->mc_lock); in mld_report_work()
1644 mutex_unlock(&idev->mc_lock); in mld_report_work()
1647 in6_dev_put(idev); in mld_report_work()
1733 static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) in mld_newpack() argument
1737 struct net_device *dev = idev->dev; in mld_newpack()
1762 if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { in mld_newpack()
1792 struct inet6_dev *idev; in mld_sendpack() local
1799 idev = __in6_dev_get(skb->dev); in mld_sendpack()
1800 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); in mld_sendpack()
1831 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT); in mld_sendpack()
1832 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); in mld_sendpack()
1834 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); in mld_sendpack()
1857 skb = mld_newpack(pmc->idev, mtu); in add_grhead()
1882 struct inet6_dev *idev = pmc->idev; in add_grec() local
1883 struct net_device *dev = idev->dev; in add_grec()
1915 skb = mld_newpack(idev, mtu); in add_grec()
1920 for (psf = mc_dereference(*psf_list, idev); in add_grec()
1925 psf_next = mc_dereference(psf->sf_next, idev); in add_grec()
1953 skb = mld_newpack(idev, mtu); in add_grec()
1973 mc_dereference(psf->sf_next, idev)); in add_grec()
1976 mc_dereference(psf->sf_next, idev)); in add_grec()
2007 static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) in mld_send_report() argument
2013 for_each_mc_mclock(idev, pmc) { in mld_send_report()
2037 static void mld_clear_zeros(struct ip6_sf_list __rcu **ppsf, struct inet6_dev *idev) in mld_clear_zeros() argument
2042 for (psf = mc_dereference(*ppsf, idev); in mld_clear_zeros()
2045 psf_next = mc_dereference(psf->sf_next, idev); in mld_clear_zeros()
2049 mc_dereference(psf->sf_next, idev)); in mld_clear_zeros()
2052 mc_dereference(psf->sf_next, idev)); in mld_clear_zeros()
2061 static void mld_send_cr(struct inet6_dev *idev) in mld_send_cr() argument
2069 for (pmc = mc_dereference(idev->mc_tomb, idev); in mld_send_cr()
2072 pmc_next = mc_dereference(pmc->next, idev); in mld_send_cr()
2086 mld_clear_zeros(&pmc->mca_tomb, idev); in mld_send_cr()
2087 mld_clear_zeros(&pmc->mca_sources, idev); in mld_send_cr()
2096 rcu_assign_pointer(idev->mc_tomb, pmc_next); in mld_send_cr()
2097 in6_dev_put(pmc->idev); in mld_send_cr()
2104 for_each_mc_mclock(idev, pmc) { in mld_send_cr()
2134 struct inet6_dev *idev; in igmp6_send() local
2196 idev = __in6_dev_get(skb->dev); in igmp6_send()
2213 ICMP6MSGOUT_INC_STATS(net, idev, type); in igmp6_send()
2214 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); in igmp6_send()
2216 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); in igmp6_send()
2227 static void mld_send_initial_cr(struct inet6_dev *idev) in mld_send_initial_cr() argument
2233 if (mld_in_v1_mode(idev)) in mld_send_initial_cr()
2237 for_each_mc_mclock(idev, pmc) { in mld_send_initial_cr()
2248 void ipv6_mc_dad_complete(struct inet6_dev *idev) in ipv6_mc_dad_complete() argument
2250 mutex_lock(&idev->mc_lock); in ipv6_mc_dad_complete()
2251 idev->mc_dad_count = idev->mc_qrv; in ipv6_mc_dad_complete()
2252 if (idev->mc_dad_count) { in ipv6_mc_dad_complete()
2253 mld_send_initial_cr(idev); in ipv6_mc_dad_complete()
2254 idev->mc_dad_count--; in ipv6_mc_dad_complete()
2255 if (idev->mc_dad_count) in ipv6_mc_dad_complete()
2256 mld_dad_start_work(idev, in ipv6_mc_dad_complete()
2257 unsolicited_report_interval(idev)); in ipv6_mc_dad_complete()
2259 mutex_unlock(&idev->mc_lock); in ipv6_mc_dad_complete()
2264 struct inet6_dev *idev = container_of(to_delayed_work(work), in mld_dad_work() local
2267 mutex_lock(&idev->mc_lock); in mld_dad_work()
2268 mld_send_initial_cr(idev); in mld_dad_work()
2269 if (idev->mc_dad_count) { in mld_dad_work()
2270 idev->mc_dad_count--; in mld_dad_work()
2271 if (idev->mc_dad_count) in mld_dad_work()
2272 mld_dad_start_work(idev, in mld_dad_work()
2273 unsolicited_report_interval(idev)); in mld_dad_work()
2275 mutex_unlock(&idev->mc_lock); in mld_dad_work()
2276 in6_dev_put(idev); in mld_dad_work()
2298 struct inet6_dev *idev = pmc->idev; in ip6_mc_del1_src() local
2303 mc_dereference(psf->sf_next, idev)); in ip6_mc_del1_src()
2306 mc_dereference(psf->sf_next, idev)); in ip6_mc_del1_src()
2309 !mld_in_v1_mode(idev)) { in ip6_mc_del1_src()
2310 psf->sf_crcount = idev->mc_qrv; in ip6_mc_del1_src()
2312 mc_dereference(pmc->mca_tomb, idev)); in ip6_mc_del1_src()
2323 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, in ip6_mc_del_src() argument
2331 if (!idev) in ip6_mc_del_src()
2334 for_each_mc_mclock(idev, pmc) { in ip6_mc_del_src()
2363 pmc->mca_crcount = idev->mc_qrv; in ip6_mc_del_src()
2364 idev->mc_ifc_count = pmc->mca_crcount; in ip6_mc_del_src()
2367 mld_ifc_event(pmc->idev); in ip6_mc_del_src()
2369 mld_ifc_event(pmc->idev); in ip6_mc_del_src()
2428 int qrv = pmc->idev->mc_qrv; in sf_setstate()
2452 pmc->idev)); in sf_setstate()
2456 pmc->idev)); in sf_setstate()
2479 mc_dereference(pmc->mca_tomb, pmc->idev)); in sf_setstate()
2493 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, in ip6_mc_add_src() argument
2501 if (!idev) in ip6_mc_add_src()
2504 for_each_mc_mclock(idev, pmc) { in ip6_mc_add_src()
2538 pmc->mca_crcount = idev->mc_qrv; in ip6_mc_add_src()
2539 idev->mc_ifc_count = pmc->mca_crcount; in ip6_mc_add_src()
2542 mld_ifc_event(idev); in ip6_mc_add_src()
2544 mld_ifc_event(idev); in ip6_mc_add_src()
2554 for (psf = mc_dereference(pmc->mca_tomb, pmc->idev); in ip6_mc_clear_src()
2557 nextpsf = mc_dereference(psf->sf_next, pmc->idev); in ip6_mc_clear_src()
2561 for (psf = mc_dereference(pmc->mca_sources, pmc->idev); in ip6_mc_clear_src()
2564 nextpsf = mc_dereference(psf->sf_next, pmc->idev); in ip6_mc_clear_src()
2581 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); in igmp6_join_group()
2583 delay = prandom_u32() % unsolicited_report_interval(ma->idev); in igmp6_join_group()
2596 struct inet6_dev *idev) in ip6_mc_leave_src() argument
2603 if (idev) in ip6_mc_leave_src()
2604 mutex_lock(&idev->mc_lock); in ip6_mc_leave_src()
2608 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0); in ip6_mc_leave_src()
2610 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, in ip6_mc_leave_src()
2618 if (idev) in ip6_mc_leave_src()
2619 mutex_unlock(&idev->mc_lock); in ip6_mc_leave_src()
2627 if (mld_in_v1_mode(ma->idev)) { in igmp6_leave_group()
2629 igmp6_send(&ma->mca_addr, ma->idev->dev, in igmp6_leave_group()
2633 mld_add_delrec(ma->idev, ma); in igmp6_leave_group()
2634 mld_ifc_event(ma->idev); in igmp6_leave_group()
2640 struct inet6_dev *idev = container_of(to_delayed_work(work), in mld_gq_work() local
2644 mutex_lock(&idev->mc_lock); in mld_gq_work()
2645 mld_send_report(idev, NULL); in mld_gq_work()
2646 idev->mc_gq_running = 0; in mld_gq_work()
2647 mutex_unlock(&idev->mc_lock); in mld_gq_work()
2649 in6_dev_put(idev); in mld_gq_work()
2654 struct inet6_dev *idev = container_of(to_delayed_work(work), in mld_ifc_work() local
2658 mutex_lock(&idev->mc_lock); in mld_ifc_work()
2659 mld_send_cr(idev); in mld_ifc_work()
2661 if (idev->mc_ifc_count) { in mld_ifc_work()
2662 idev->mc_ifc_count--; in mld_ifc_work()
2663 if (idev->mc_ifc_count) in mld_ifc_work()
2664 mld_ifc_start_work(idev, in mld_ifc_work()
2665 unsolicited_report_interval(idev)); in mld_ifc_work()
2667 mutex_unlock(&idev->mc_lock); in mld_ifc_work()
2668 in6_dev_put(idev); in mld_ifc_work()
2672 static void mld_ifc_event(struct inet6_dev *idev) in mld_ifc_event() argument
2674 if (mld_in_v1_mode(idev)) in mld_ifc_event()
2677 idev->mc_ifc_count = idev->mc_qrv; in mld_ifc_event()
2678 mld_ifc_start_work(idev, 1); in mld_ifc_event()
2686 mutex_lock(&ma->idev->mc_lock); in mld_mca_work()
2687 if (mld_in_v1_mode(ma->idev)) in mld_mca_work()
2688 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); in mld_mca_work()
2690 mld_send_report(ma->idev, ma); in mld_mca_work()
2693 mutex_unlock(&ma->idev->mc_lock); in mld_mca_work()
2700 void ipv6_mc_unmap(struct inet6_dev *idev) in ipv6_mc_unmap() argument
2706 mutex_lock(&idev->mc_lock); in ipv6_mc_unmap()
2707 for_each_mc_mclock(idev, i) in ipv6_mc_unmap()
2709 mutex_unlock(&idev->mc_lock); in ipv6_mc_unmap()
2712 void ipv6_mc_remap(struct inet6_dev *idev) in ipv6_mc_remap() argument
2714 ipv6_mc_up(idev); in ipv6_mc_remap()
2718 void ipv6_mc_down(struct inet6_dev *idev) in ipv6_mc_down() argument
2722 mutex_lock(&idev->mc_lock); in ipv6_mc_down()
2724 for_each_mc_mclock(idev, i) in ipv6_mc_down()
2726 mutex_unlock(&idev->mc_lock); in ipv6_mc_down()
2732 mld_query_stop_work(idev); in ipv6_mc_down()
2733 mld_report_stop_work(idev); in ipv6_mc_down()
2734 mld_ifc_stop_work(idev); in ipv6_mc_down()
2735 mld_gq_stop_work(idev); in ipv6_mc_down()
2736 mld_dad_stop_work(idev); in ipv6_mc_down()
2739 static void ipv6_mc_reset(struct inet6_dev *idev) in ipv6_mc_reset() argument
2741 idev->mc_qrv = sysctl_mld_qrv; in ipv6_mc_reset()
2742 idev->mc_qi = MLD_QI_DEFAULT; in ipv6_mc_reset()
2743 idev->mc_qri = MLD_QRI_DEFAULT; in ipv6_mc_reset()
2744 idev->mc_v1_seen = 0; in ipv6_mc_reset()
2745 idev->mc_maxdelay = unsolicited_report_interval(idev); in ipv6_mc_reset()
2750 void ipv6_mc_up(struct inet6_dev *idev) in ipv6_mc_up() argument
2756 ipv6_mc_reset(idev); in ipv6_mc_up()
2757 mutex_lock(&idev->mc_lock); in ipv6_mc_up()
2758 for_each_mc_mclock(idev, i) { in ipv6_mc_up()
2759 mld_del_delrec(idev, i); in ipv6_mc_up()
2762 mutex_unlock(&idev->mc_lock); in ipv6_mc_up()
2767 void ipv6_mc_init_dev(struct inet6_dev *idev) in ipv6_mc_init_dev() argument
2769 idev->mc_gq_running = 0; in ipv6_mc_init_dev()
2770 INIT_DELAYED_WORK(&idev->mc_gq_work, mld_gq_work); in ipv6_mc_init_dev()
2771 RCU_INIT_POINTER(idev->mc_tomb, NULL); in ipv6_mc_init_dev()
2772 idev->mc_ifc_count = 0; in ipv6_mc_init_dev()
2773 INIT_DELAYED_WORK(&idev->mc_ifc_work, mld_ifc_work); in ipv6_mc_init_dev()
2774 INIT_DELAYED_WORK(&idev->mc_dad_work, mld_dad_work); in ipv6_mc_init_dev()
2775 INIT_DELAYED_WORK(&idev->mc_query_work, mld_query_work); in ipv6_mc_init_dev()
2776 INIT_DELAYED_WORK(&idev->mc_report_work, mld_report_work); in ipv6_mc_init_dev()
2777 skb_queue_head_init(&idev->mc_query_queue); in ipv6_mc_init_dev()
2778 skb_queue_head_init(&idev->mc_report_queue); in ipv6_mc_init_dev()
2779 spin_lock_init(&idev->mc_query_lock); in ipv6_mc_init_dev()
2780 spin_lock_init(&idev->mc_report_lock); in ipv6_mc_init_dev()
2781 mutex_init(&idev->mc_lock); in ipv6_mc_init_dev()
2782 ipv6_mc_reset(idev); in ipv6_mc_init_dev()
2789 void ipv6_mc_destroy_dev(struct inet6_dev *idev) in ipv6_mc_destroy_dev() argument
2794 ipv6_mc_down(idev); in ipv6_mc_destroy_dev()
2795 mutex_lock(&idev->mc_lock); in ipv6_mc_destroy_dev()
2796 mld_clear_delrec(idev); in ipv6_mc_destroy_dev()
2797 mutex_unlock(&idev->mc_lock); in ipv6_mc_destroy_dev()
2798 mld_clear_query(idev); in ipv6_mc_destroy_dev()
2799 mld_clear_report(idev); in ipv6_mc_destroy_dev()
2806 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes); in ipv6_mc_destroy_dev()
2808 if (idev->cnf.forwarding) in ipv6_mc_destroy_dev()
2809 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters); in ipv6_mc_destroy_dev()
2811 mutex_lock(&idev->mc_lock); in ipv6_mc_destroy_dev()
2812 while ((i = mc_dereference(idev->mc_list, idev))) { in ipv6_mc_destroy_dev()
2813 rcu_assign_pointer(idev->mc_list, mc_dereference(i->next, idev)); in ipv6_mc_destroy_dev()
2818 mutex_unlock(&idev->mc_lock); in ipv6_mc_destroy_dev()
2821 static void ipv6_mc_rejoin_groups(struct inet6_dev *idev) in ipv6_mc_rejoin_groups() argument
2827 mutex_lock(&idev->mc_lock); in ipv6_mc_rejoin_groups()
2828 if (mld_in_v1_mode(idev)) { in ipv6_mc_rejoin_groups()
2829 for_each_mc_mclock(idev, pmc) in ipv6_mc_rejoin_groups()
2832 mld_send_report(idev, NULL); in ipv6_mc_rejoin_groups()
2834 mutex_unlock(&idev->mc_lock); in ipv6_mc_rejoin_groups()
2842 struct inet6_dev *idev = __in6_dev_get(dev); in ipv6_mc_netdev_event() local
2846 if (idev) in ipv6_mc_netdev_event()
2847 ipv6_mc_rejoin_groups(idev); in ipv6_mc_netdev_event()
2864 struct inet6_dev *idev; member
2875 state->idev = NULL; in igmp6_mc_get_first()
2877 struct inet6_dev *idev; in igmp6_mc_get_first() local
2878 idev = __in6_dev_get(state->dev); in igmp6_mc_get_first()
2879 if (!idev) in igmp6_mc_get_first()
2882 im = rcu_dereference(idev->mc_list); in igmp6_mc_get_first()
2884 state->idev = idev; in igmp6_mc_get_first()
2899 state->idev = NULL; in igmp6_mc_get_next()
2902 state->idev = __in6_dev_get(state->dev); in igmp6_mc_get_next()
2903 if (!state->idev) in igmp6_mc_get_next()
2905 im = rcu_dereference(state->idev->mc_list); in igmp6_mc_get_next()
2939 if (likely(state->idev)) in igmp6_mc_seq_stop()
2940 state->idev = NULL; in igmp6_mc_seq_stop()
2970 struct inet6_dev *idev; member
2983 state->idev = NULL; in igmp6_mcf_get_first()
2986 struct inet6_dev *idev; in igmp6_mcf_get_first() local
2987 idev = __in6_dev_get(state->dev); in igmp6_mcf_get_first()
2988 if (unlikely(idev == NULL)) in igmp6_mcf_get_first()
2991 im = rcu_dereference(idev->mc_list); in igmp6_mcf_get_first()
2996 state->idev = idev; in igmp6_mcf_get_first()
3014 state->idev = NULL; in igmp6_mcf_get_next()
3017 state->idev = __in6_dev_get(state->dev); in igmp6_mcf_get_next()
3018 if (!state->idev) in igmp6_mcf_get_next()
3020 state->im = rcu_dereference(state->idev->mc_list); in igmp6_mcf_get_next()
3064 if (likely(state->idev)) in igmp6_mcf_seq_stop()
3065 state->idev = NULL; in igmp6_mcf_seq_stop()