Lines Matching refs:mcast

74 					       struct ipoib_mcast *mcast,  in __ipoib_mcast_schedule_join_thread()  argument
85 if (mcast && delay) { in __ipoib_mcast_schedule_join_thread()
89 mcast->backoff *= 2; in __ipoib_mcast_schedule_join_thread()
90 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) in __ipoib_mcast_schedule_join_thread()
91 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; in __ipoib_mcast_schedule_join_thread()
92 mcast->delay_until = jiffies + (mcast->backoff * HZ); in __ipoib_mcast_schedule_join_thread()
112 static void ipoib_mcast_free(struct ipoib_mcast *mcast) in ipoib_mcast_free() argument
114 struct net_device *dev = mcast->dev; in ipoib_mcast_free()
118 mcast->mcmember.mgid.raw); in ipoib_mcast_free()
121 ipoib_del_neighs_by_gid(dev, mcast->mcmember.mgid.raw); in ipoib_mcast_free()
123 if (mcast->ah) in ipoib_mcast_free()
124 ipoib_put_ah(mcast->ah); in ipoib_mcast_free()
126 while (!skb_queue_empty(&mcast->pkt_queue)) { in ipoib_mcast_free()
128 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); in ipoib_mcast_free()
135 kfree(mcast); in ipoib_mcast_free()
140 struct ipoib_mcast *mcast; in ipoib_mcast_alloc() local
142 mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC); in ipoib_mcast_alloc()
143 if (!mcast) in ipoib_mcast_alloc()
146 mcast->dev = dev; in ipoib_mcast_alloc()
147 mcast->created = jiffies; in ipoib_mcast_alloc()
148 mcast->delay_until = jiffies; in ipoib_mcast_alloc()
149 mcast->backoff = 1; in ipoib_mcast_alloc()
151 INIT_LIST_HEAD(&mcast->list); in ipoib_mcast_alloc()
152 INIT_LIST_HEAD(&mcast->neigh_list); in ipoib_mcast_alloc()
153 skb_queue_head_init(&mcast->pkt_queue); in ipoib_mcast_alloc()
155 return mcast; in ipoib_mcast_alloc()
164 struct ipoib_mcast *mcast; in __ipoib_mcast_find() local
167 mcast = rb_entry(n, struct ipoib_mcast, rb_node); in __ipoib_mcast_find()
169 ret = memcmp(mgid, mcast->mcmember.mgid.raw, in __ipoib_mcast_find()
176 return mcast; in __ipoib_mcast_find()
182 static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast) in __ipoib_mcast_add() argument
194 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw, in __ipoib_mcast_add()
204 rb_link_node(&mcast->rb_node, pn, n); in __ipoib_mcast_add()
205 rb_insert_color(&mcast->rb_node, &priv->multicast_tree); in __ipoib_mcast_add()
210 static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, in ipoib_mcast_join_finish() argument
213 struct net_device *dev = mcast->dev; in ipoib_mcast_join_finish()
222 mcast->mcmember = *mcmember; in ipoib_mcast_join_finish()
227 if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4, in ipoib_mcast_join_finish()
256 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { in ipoib_mcast_join_finish()
257 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { in ipoib_mcast_join_finish()
259 mcast->mcmember.mgid.raw); in ipoib_mcast_join_finish()
264 ret = rn->attach_mcast(dev, priv->ca, &mcast->mcmember.mgid, in ipoib_mcast_join_finish()
265 be16_to_cpu(mcast->mcmember.mlid), in ipoib_mcast_join_finish()
269 mcast->mcmember.mgid.raw); in ipoib_mcast_join_finish()
271 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags); in ipoib_mcast_join_finish()
278 rdma_ah_set_dlid(&av, be16_to_cpu(mcast->mcmember.mlid)); in ipoib_mcast_join_finish()
280 rdma_ah_set_sl(&av, mcast->mcmember.sl); in ipoib_mcast_join_finish()
281 rdma_ah_set_static_rate(&av, mcast->mcmember.rate); in ipoib_mcast_join_finish()
283 rdma_ah_set_grh(&av, &mcast->mcmember.mgid, in ipoib_mcast_join_finish()
284 be32_to_cpu(mcast->mcmember.flow_label), in ipoib_mcast_join_finish()
285 0, mcast->mcmember.hop_limit, in ipoib_mcast_join_finish()
286 mcast->mcmember.traffic_class); in ipoib_mcast_join_finish()
296 mcast->ah = ah; in ipoib_mcast_join_finish()
300 mcast->mcmember.mgid.raw, in ipoib_mcast_join_finish()
301 mcast->ah->ah, in ipoib_mcast_join_finish()
302 be16_to_cpu(mcast->mcmember.mlid), in ipoib_mcast_join_finish()
303 mcast->mcmember.sl); in ipoib_mcast_join_finish()
307 while (!skb_queue_empty(&mcast->pkt_queue)) { in ipoib_mcast_join_finish()
308 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); in ipoib_mcast_join_finish()
360 struct ipoib_mcast *mcast = multicast->context; in ipoib_mcast_join_complete() local
361 struct net_device *dev = mcast->dev; in ipoib_mcast_join_complete()
365 test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? in ipoib_mcast_join_complete()
367 mcast->mcmember.mgid.raw, status); in ipoib_mcast_join_complete()
376 status = ipoib_mcast_join_finish(mcast, &multicast->rec); in ipoib_mcast_join_complete()
379 mcast->backoff = 1; in ipoib_mcast_join_complete()
380 mcast->delay_until = jiffies; in ipoib_mcast_join_complete()
389 if (mcast == priv->broadcast) { in ipoib_mcast_join_complete()
397 test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && in ipoib_mcast_join_complete()
400 if (mcast->logcount < 20) { in ipoib_mcast_join_complete()
404 test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "", in ipoib_mcast_join_complete()
405 mcast->mcmember.mgid.raw, status); in ipoib_mcast_join_complete()
408 test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "", in ipoib_mcast_join_complete()
409 mcast->mcmember.mgid.raw, status); in ipoib_mcast_join_complete()
413 mcast->logcount++; in ipoib_mcast_join_complete()
416 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && in ipoib_mcast_join_complete()
417 mcast->backoff >= 2) { in ipoib_mcast_join_complete()
427 mcast->backoff = 1; in ipoib_mcast_join_complete()
429 while (!skb_queue_empty(&mcast->pkt_queue)) { in ipoib_mcast_join_complete()
431 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); in ipoib_mcast_join_complete()
437 __ipoib_mcast_schedule_join_thread(priv, mcast, 1); in ipoib_mcast_join_complete()
449 mcast->mc = NULL; in ipoib_mcast_join_complete()
451 mcast->mc = multicast; in ipoib_mcast_join_complete()
452 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); in ipoib_mcast_join_complete()
454 complete(&mcast->done); in ipoib_mcast_join_complete()
462 static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) in ipoib_mcast_join() argument
476 init_completion(&mcast->done); in ipoib_mcast_join()
477 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); in ipoib_mcast_join()
479 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); in ipoib_mcast_join()
481 rec.mgid = mcast->mcmember.mgid; in ipoib_mcast_join()
491 if (mcast != priv->broadcast) { in ipoib_mcast_join()
531 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) in ipoib_mcast_join()
538 ipoib_mcast_join_complete, mcast); in ipoib_mcast_join()
544 __ipoib_mcast_schedule_join_thread(priv, mcast, 1); in ipoib_mcast_join()
545 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); in ipoib_mcast_join()
547 complete(&mcast->done); in ipoib_mcast_join()
560 struct ipoib_mcast *mcast = NULL; in ipoib_mcast_join_task() local
613 mcast = priv->broadcast; in ipoib_mcast_join_task()
614 if (mcast->backoff > 1 && in ipoib_mcast_join_task()
615 time_before(jiffies, mcast->delay_until)) { in ipoib_mcast_join_task()
616 delay_until = mcast->delay_until; in ipoib_mcast_join_task()
617 mcast = NULL; in ipoib_mcast_join_task()
627 list_for_each_entry(mcast, &priv->multicast_list, list) { in ipoib_mcast_join_task()
628 if (IS_ERR_OR_NULL(mcast->mc) && in ipoib_mcast_join_task()
629 !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) && in ipoib_mcast_join_task()
630 (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) || in ipoib_mcast_join_task()
631 !skb_queue_empty(&mcast->pkt_queue))) { in ipoib_mcast_join_task()
632 if (mcast->backoff == 1 || in ipoib_mcast_join_task()
633 time_after_eq(jiffies, mcast->delay_until)) { in ipoib_mcast_join_task()
635 if (ipoib_mcast_join(dev, mcast)) { in ipoib_mcast_join_task()
640 time_before(mcast->delay_until, delay_until)) in ipoib_mcast_join_task()
641 delay_until = mcast->delay_until; in ipoib_mcast_join_task()
645 mcast = NULL; in ipoib_mcast_join_task()
654 if (mcast) in ipoib_mcast_join_task()
655 ipoib_mcast_join(dev, mcast); in ipoib_mcast_join_task()
681 static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) in ipoib_mcast_leave() argument
687 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) in ipoib_mcast_leave()
690 if (!IS_ERR_OR_NULL(mcast->mc)) in ipoib_mcast_leave()
691 ib_sa_free_multicast(mcast->mc); in ipoib_mcast_leave()
693 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { in ipoib_mcast_leave()
695 mcast->mcmember.mgid.raw); in ipoib_mcast_leave()
698 ret = rn->detach_mcast(dev, priv->ca, &mcast->mcmember.mgid, in ipoib_mcast_leave()
699 be16_to_cpu(mcast->mcmember.mlid)); in ipoib_mcast_leave()
702 } else if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) in ipoib_mcast_leave()
718 struct ipoib_mcast *mcast = __ipoib_mcast_find(priv->dev, mgid); in ipoib_check_and_add_mcast_sendonly() local
720 if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { in ipoib_check_and_add_mcast_sendonly()
721 list_del(&mcast->list); in ipoib_check_and_add_mcast_sendonly()
722 rb_erase(&mcast->rb_node, &priv->multicast_tree); in ipoib_check_and_add_mcast_sendonly()
723 list_add_tail(&mcast->list, remove_list); in ipoib_check_and_add_mcast_sendonly()
730 struct ipoib_mcast *mcast, *tmcast; in ipoib_mcast_remove_list() local
736 list_for_each_entry_safe(mcast, tmcast, remove_list, list) in ipoib_mcast_remove_list()
737 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) in ipoib_mcast_remove_list()
738 wait_for_completion(&mcast->done); in ipoib_mcast_remove_list()
740 list_for_each_entry_safe(mcast, tmcast, remove_list, list) { in ipoib_mcast_remove_list()
741 ipoib_mcast_leave(mcast->dev, mcast); in ipoib_mcast_remove_list()
742 ipoib_mcast_free(mcast); in ipoib_mcast_remove_list()
750 struct ipoib_mcast *mcast; in ipoib_mcast_send() local
764 mcast = __ipoib_mcast_find(dev, mgid); in ipoib_mcast_send()
765 if (!mcast || !mcast->ah) { in ipoib_mcast_send()
766 if (!mcast) { in ipoib_mcast_send()
771 mcast = ipoib_mcast_alloc(dev); in ipoib_mcast_send()
772 if (!mcast) { in ipoib_mcast_send()
780 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags); in ipoib_mcast_send()
781 memcpy(mcast->mcmember.mgid.raw, mgid, in ipoib_mcast_send()
783 __ipoib_mcast_add(dev, mcast); in ipoib_mcast_send()
784 list_add_tail(&mcast->list, &priv->multicast_list); in ipoib_mcast_send()
786 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) { in ipoib_mcast_send()
789 skb_queue_tail(&mcast->pkt_queue, skb); in ipoib_mcast_send()
794 if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { in ipoib_mcast_send()
809 kref_get(&mcast->ah->ref); in ipoib_mcast_send()
810 neigh->ah = mcast->ah; in ipoib_mcast_send()
812 list_add_tail(&neigh->list, &mcast->neigh_list); in ipoib_mcast_send()
816 mcast->ah->last_send = rn->send(dev, skb, mcast->ah->ah, in ipoib_mcast_send()
831 struct ipoib_mcast *mcast, *tmcast; in ipoib_mcast_dev_flush() local
839 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { in ipoib_mcast_dev_flush()
840 list_del(&mcast->list); in ipoib_mcast_dev_flush()
841 rb_erase(&mcast->rb_node, &priv->multicast_tree); in ipoib_mcast_dev_flush()
842 list_add_tail(&mcast->list, &remove_list); in ipoib_mcast_dev_flush()
874 struct ipoib_mcast *mcast, *tmcast; in ipoib_mcast_restart_task() local
897 list_for_each_entry(mcast, &priv->multicast_list, list) in ipoib_mcast_restart_task()
898 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); in ipoib_mcast_restart_task()
909 mcast = __ipoib_mcast_find(dev, &mgid); in ipoib_mcast_restart_task()
910 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { in ipoib_mcast_restart_task()
935 if (mcast) { in ipoib_mcast_restart_task()
937 list_move_tail(&mcast->list, &remove_list); in ipoib_mcast_restart_task()
939 rb_replace_node(&mcast->rb_node, in ipoib_mcast_restart_task()
948 if (mcast) in ipoib_mcast_restart_task()
949 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); in ipoib_mcast_restart_task()
953 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { in ipoib_mcast_restart_task()
954 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) && in ipoib_mcast_restart_task()
955 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { in ipoib_mcast_restart_task()
957 mcast->mcmember.mgid.raw); in ipoib_mcast_restart_task()
959 rb_erase(&mcast->rb_node, &priv->multicast_tree); in ipoib_mcast_restart_task()
962 list_move_tail(&mcast->list, &remove_list); in ipoib_mcast_restart_task()
1006 struct ipoib_mcast *mcast; in ipoib_mcast_iter_next() local
1014 mcast = rb_entry(n, struct ipoib_mcast, rb_node); in ipoib_mcast_iter_next()
1016 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw, in ipoib_mcast_iter_next()
1018 iter->mgid = mcast->mcmember.mgid; in ipoib_mcast_iter_next()
1019 iter->created = mcast->created; in ipoib_mcast_iter_next()
1020 iter->queuelen = skb_queue_len(&mcast->pkt_queue); in ipoib_mcast_iter_next()
1021 iter->complete = !!mcast->ah; in ipoib_mcast_iter_next()
1022 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY)); in ipoib_mcast_iter_next()