Lines Matching refs:group

50 #define mcg_warn_group(group, format, arg...) \
52 (group)->name, group->demux->port, ## arg)
54 #define mcg_debug_group(group, format, arg...) \
56 (group)->name, (group)->demux->port, ## arg)
58 #define mcg_error_group(group, format, arg...) \
59 pr_err(" %16s: " format, (group)->name, ## arg)
136 struct mcast_group *group;
144 mcg_warn_group(group, "did not expect to reach zero\n"); \
166 struct mcast_group *group;
170 group = rb_entry(node, struct mcast_group, node);
171 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
173 return group;
184 struct mcast_group *group)
195 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
196 sizeof group->rec.mgid);
204 rb_link_node(&group->node, parent, link);
205 rb_insert_color(&group->node, &ctx->mcg_table);
252 static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad)
262 sa_mad_data->port_gid.global.interface_id = group->demux->guid_cache[0];
265 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux);
266 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */
268 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad);
272 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
279 static int send_leave_to_wire(struct mcast_group *group, u8 join_state)
292 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux);
293 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */
301 *sa_data = group->rec;
304 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad);
306 group->state = MCAST_IDLE;
311 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
318 static int send_reply_to_slave(int slave, struct mcast_group *group,
341 *sa_data = group->rec;
345 sa_data->scope_join_state |= (group->func[slave].join_state & 0x0f);
348 ret = send_mad_to_slave(slave, group->demux, (struct ib_mad *)&mad);
386 /* src is group record, dst is request record */
435 /* release group, return 1 if this was last release and group is destroyed
437 static int release_group(struct mcast_group *group, int from_timeout_handler)
439 struct mlx4_ib_demux_ctx *ctx = group->demux;
443 mutex_lock(&group->lock);
444 if (atomic_dec_and_test(&group->refcount)) {
446 if (group->state != MCAST_IDLE &&
447 !cancel_delayed_work(&group->timeout_work)) {
448 atomic_inc(&group->refcount);
449 mutex_unlock(&group->lock);
455 nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0);
457 del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
458 if (!list_empty(&group->pending_list))
459 mcg_warn_group(group, "releasing a group with non empty pending list\n");
461 rb_erase(&group->node, &ctx->mcg_table);
462 list_del_init(&group->mgid0_list);
463 mutex_unlock(&group->lock);
465 kfree(group);
468 mutex_unlock(&group->lock);
474 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
480 group->members[i] += inc;
483 static u8 get_leave_state(struct mcast_group *group)
489 if (!group->members[i])
492 return leave_state & (group->rec.scope_join_state & 0xf);
495 static int join_group(struct mcast_group *group, int slave, u8 join_mask)
501 join_state = join_mask & (~group->func[slave].join_state);
502 adjust_membership(group, join_state, 1);
503 group->func[slave].join_state |= join_state;
504 if (group->func[slave].state != MCAST_MEMBER && join_state) {
505 group->func[slave].state = MCAST_MEMBER;
511 static int leave_group(struct mcast_group *group, int slave, u8 leave_state)
515 adjust_membership(group, leave_state, -1);
516 group->func[slave].join_state &= ~leave_state;
517 if (!group->func[slave].join_state) {
518 group->func[slave].state = MCAST_NOT_MEMBER;
524 static int check_leave(struct mcast_group *group, int slave, u8 leave_mask)
526 if (group->func[slave].state != MCAST_MEMBER)
530 if (~group->func[slave].join_state & leave_mask)
542 struct mcast_group *group;
545 group = container_of(delay, typeof(*group), timeout_work);
547 mutex_lock(&group->lock);
548 if (group->state == MCAST_JOIN_SENT) {
549 if (!list_empty(&group->pending_list)) {
550 req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
553 --group->func[req->func].num_pend_reqs;
554 mutex_unlock(&group->lock);
556 if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) {
557 if (release_group(group, 1))
560 kfree(group);
563 mutex_lock(&group->lock);
565 mcg_warn_group(group, "DRIVER BUG\n");
566 } else if (group->state == MCAST_LEAVE_SENT) {
567 if (group->rec.scope_join_state & 0xf)
568 group->rec.scope_join_state &= 0xf0;
569 group->state = MCAST_IDLE;
570 mutex_unlock(&group->lock);
571 if (release_group(group, 1))
573 mutex_lock(&group->lock);
575 mcg_warn_group(group, "invalid state %s\n", get_state_string(group->state));
576 group->state = MCAST_IDLE;
577 atomic_inc(&group->refcount);
578 if (!queue_work(group->demux->mcg_wq, &group->work))
579 safe_atomic_dec(&group->refcount);
581 mutex_unlock(&group->lock);
584 static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
590 leave_mask = group->func[req->func].join_state;
592 status = check_leave(group, req->func, leave_mask);
594 leave_group(group, req->func, leave_mask);
597 send_reply_to_slave(req->func, group, &req->sa_mad, status);
598 --group->func[req->func].num_pend_reqs;
605 static int handle_join_req(struct mcast_group *group, u8 join_mask,
608 u8 group_join_state = group->rec.scope_join_state & 0xf;
615 status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask);
617 join_group(group, req->func, join_mask);
619 --group->func[req->func].num_pend_reqs;
620 send_reply_to_slave(req->func, group, &req->sa_mad, status);
627 group->prev_state = group->state;
628 if (send_join_to_wire(group, &req->sa_mad)) {
629 --group->func[req->func].num_pend_reqs;
634 group->state = group->prev_state;
636 group->state = MCAST_JOIN_SENT;
644 struct mcast_group *group;
652 group = container_of(work, typeof(*group), work);
654 mutex_lock(&group->lock);
656 /* First, let's see if a response from SM is waiting regarding this group.
657 * If so, we need to update the group's REC. If this is a bad response, we
660 if (group->state == MCAST_RESP_READY) {
662 cancel_delayed_work(&group->timeout_work);
663 status = be16_to_cpu(group->response_sa_mad.mad_hdr.status);
664 method = group->response_sa_mad.mad_hdr.method;
665 if (group->last_req_tid != group->response_sa_mad.mad_hdr.tid) {
666 mcg_warn_group(group, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n",
667 be64_to_cpu(group->response_sa_mad.mad_hdr.tid),
668 be64_to_cpu(group->last_req_tid));
669 group->state = group->prev_state;
673 if (!list_empty(&group->pending_list))
674 req = list_first_entry(&group->pending_list,
678 send_reply_to_slave(req->func, group, &req->sa_mad, status);
679 --group->func[req->func].num_pend_reqs;
685 mcg_warn_group(group, "no request for failed join\n");
686 } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing)
693 group->response_sa_mad.data)->scope_join_state & 0xf;
694 cur_join_state = group->rec.scope_join_state & 0xf;
702 memcpy(&group->rec, group->response_sa_mad.data, sizeof group->rec);
704 group->state = MCAST_IDLE;
709 while (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) {
710 req = list_first_entry(&group->pending_list, struct mcast_req,
719 rc += handle_leave_req(group, req_join_state, req);
721 rc += handle_join_req(group, req_join_state, req);
725 if (group->state == MCAST_IDLE) {
726 req_join_state = get_leave_state(group);
728 group->rec.scope_join_state &= ~req_join_state;
729 group->prev_state = group->state;
730 if (send_leave_to_wire(group, req_join_state)) {
731 group->state = group->prev_state;
734 group->state = MCAST_LEAVE_SENT;
738 if (!list_empty(&group->pending_list) && group->state == MCAST_IDLE)
740 mutex_unlock(&group->lock);
743 release_group(group, 0);
750 struct mcast_group *group = NULL, *cur_group, *n;
754 list_for_each_entry_safe(group, n, &ctx->mcg_mgid0_list, mgid0_list) {
755 mutex_lock(&group->lock);
756 if (group->last_req_tid == tid) {
758 group->rec.mgid = *new_mgid;
759 sprintf(group->name, "%016llx%016llx",
760 be64_to_cpu(group->rec.mgid.global.subnet_prefix),
761 be64_to_cpu(group->rec.mgid.global.interface_id));
762 list_del_init(&group->mgid0_list);
763 cur_group = mcast_insert(ctx, group);
766 req = list_first_entry(&group->pending_list,
768 --group->func[req->func].num_pend_reqs;
772 mutex_unlock(&group->lock);
774 release_group(group, 0);
778 atomic_inc(&group->refcount);
779 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
780 mutex_unlock(&group->lock);
782 return group;
786 list_del(&group->mgid0_list);
787 if (!list_empty(&group->pending_list) && group->state != MCAST_IDLE)
788 cancel_delayed_work_sync(&group->timeout_work);
790 list_for_each_entry_safe(tmp1, tmp2, &group->pending_list, group_list) {
794 mutex_unlock(&group->lock);
796 kfree(group);
800 mutex_unlock(&group->lock);
813 struct mcast_group *group, *cur_group;
819 group = mcast_find(ctx, mgid);
820 if (group)
827 group = kzalloc(sizeof(*group), GFP_KERNEL);
828 if (!group)
831 group->demux = ctx;
832 group->rec.mgid = *mgid;
833 INIT_LIST_HEAD(&group->pending_list);
834 INIT_LIST_HEAD(&group->mgid0_list);
836 INIT_LIST_HEAD(&group->func[i].pending);
837 INIT_WORK(&group->work, mlx4_ib_mcg_work_handler);
838 INIT_DELAYED_WORK(&group->timeout_work, mlx4_ib_mcg_timeout_handler);
839 mutex_init(&group->lock);
840 sprintf(group->name, "%016llx%016llx",
841 be64_to_cpu(group->rec.mgid.global.subnet_prefix),
842 be64_to_cpu(group->rec.mgid.global.interface_id));
843 sysfs_attr_init(&group->dentry.attr);
844 group->dentry.show = sysfs_show_group;
845 group->dentry.store = NULL;
846 group->dentry.attr.name = group->name;
847 group->dentry.attr.mode = 0400;
848 group->state = MCAST_IDLE;
851 list_add(&group->mgid0_list, &ctx->mcg_mgid0_list);
855 cur_group = mcast_insert(ctx, group);
857 mcg_warn("group just showed up %s - confused\n", cur_group->name);
858 kfree(group);
862 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
865 atomic_inc(&group->refcount);
866 return group;
871 struct mcast_group *group = req->group;
873 atomic_inc(&group->refcount); /* for the request */
874 atomic_inc(&group->refcount); /* for scheduling the work */
875 list_add_tail(&req->group_list, &group->pending_list);
876 list_add_tail(&req->func_list, &group->func[req->func].pending);
878 if (!queue_work(group->demux->mcg_wq, &group->work))
879 safe_atomic_dec(&group->refcount);
888 struct mcast_group *group;
894 group = acquire_group(ctx, &rec->mgid, 0);
896 if (IS_ERR(group)) {
899 *(u8 *)(&tid) = (u8)slave; /* in group we kept the modified TID */
900 group = search_relocate_mgid0_group(ctx, tid, &rec->mgid);
902 group = NULL;
905 if (!group)
908 mutex_lock(&group->lock);
909 group->response_sa_mad = *mad;
910 group->prev_state = group->state;
911 group->state = MCAST_RESP_READY;
913 atomic_inc(&group->refcount);
914 if (!queue_work(ctx->mcg_wq, &group->work))
915 safe_atomic_dec(&group->refcount);
916 mutex_unlock(&group->lock);
917 release_group(group, 0);
937 struct mcast_group *group;
957 group = acquire_group(ctx, &rec->mgid, may_create);
959 if (IS_ERR(group)) {
961 return PTR_ERR(group);
963 mutex_lock(&group->lock);
964 if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) {
965 mutex_unlock(&group->lock);
966 mcg_debug_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n",
968 release_group(group, 0);
972 ++group->func[slave].num_pend_reqs;
973 req->group = group;
975 mutex_unlock(&group->lock);
976 release_group(group, 0);
993 struct mcast_group *group =
1002 if (group->state == MCAST_IDLE)
1004 get_state_string(group->state));
1007 get_state_string(group->state),
1008 be64_to_cpu(group->last_req_tid));
1010 if (list_empty(&group->pending_list)) {
1013 req = list_first_entry(&group->pending_list, struct mcast_req,
1020 group->rec.scope_join_state & 0xf,
1021 group->members[2],
1022 group->members[1],
1023 group->members[0],
1024 atomic_read(&group->refcount),
1029 if (group->func[i].state == MCAST_MEMBER)
1031 group->func[i].join_state);
1034 hoplimit = be32_to_cpu(group->rec.sl_flowlabel_hoplimit);
1037 be16_to_cpu(group->rec.pkey),
1038 be32_to_cpu(group->rec.qkey),
1039 (group->rec.mtusel_mtu & 0xc0) >> 6,
1040 (group->rec.mtusel_mtu & 0x3f),
1041 group->rec.tclass,
1042 (group->rec.ratesel_rate & 0xc0) >> 6,
1043 (group->rec.ratesel_rate & 0x3f),
1047 group->rec.proxy_join);
1070 static void force_clean_group(struct mcast_group *group)
1074 list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) {
1078 del_sysfs_port_mcg_attr(group->demux->dev, group->demux->port, &group->dentry.attr);
1079 rb_erase(&group->node, &group->demux->mcg_table);
1080 kfree(group);
1087 struct mcast_group *group;
1113 group = rb_entry(p, struct mcast_group, node);
1114 if (atomic_read(&group->refcount))
1115 mcg_debug_group(group, "group refcount %d!!! (pointer %p)\n",
1116 atomic_read(&group->refcount), group);
1118 force_clean_group(group);
1173 static void clear_pending_reqs(struct mcast_group *group, int vf)
1179 if (!list_empty(&group->pending_list))
1180 group_first = list_first_entry(&group->pending_list, struct mcast_req, group_list);
1182 list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) {
1185 (group->state == MCAST_JOIN_SENT ||
1186 group->state == MCAST_LEAVE_SENT)) {
1187 clear = cancel_delayed_work(&group->timeout_work);
1189 group->state = MCAST_IDLE;
1192 --group->func[vf].num_pend_reqs;
1196 atomic_dec(&group->refcount);
1200 if (!pend && (!list_empty(&group->func[vf].pending) || group->func[vf].num_pend_reqs)) {
1201 mcg_warn_group(group, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n",
1202 list_empty(&group->func[vf].pending), group->func[vf].num_pend_reqs);
1206 static int push_deleteing_req(struct mcast_group *group, int slave)
1211 if (!group->func[slave].join_state)
1218 if (!list_empty(&group->func[slave].pending)) {
1219 pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list);
1228 req->group = group;
1229 ++group->func[slave].num_pend_reqs;
1237 struct mcast_group *group;
1242 group = rb_entry(p, struct mcast_group, node);
1243 mutex_lock(&group->lock);
1244 if (atomic_read(&group->refcount)) {
1246 clear_pending_reqs(group, slave);
1247 push_deleteing_req(group, slave);
1249 mutex_unlock(&group->lock);