Lines Matching defs:sriov

86 	guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
102 memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id],
117 return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index];
136 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
137 if (dev->sriov.alias_guid.ports_guid[port_index].state_flags &
141 curr_guid = *(__be64 *)&dev->sriov.
154 *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index].
157 dev->sriov.alias_guid.ports_guid[port_index].
160 dev->sriov.alias_guid.ports_guid[port_index].
164 dev->sriov.alias_guid.ports_guid[port_index].
166 dev->sriov.alias_guid.ports_guid[port_index].
171 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
204 rec = &dev->sriov.alias_guid.ports_guid[port_num - 1].
206 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
236 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
248 spin_unlock_irqrestore(&dev->sriov.
253 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock,
307 rec = &dev->sriov.alias_guid.ports_guid[port_index].
327 rec = &dev->sriov.alias_guid.ports_guid[port_index].
330 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
424 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
434 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
435 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
436 if (!dev->sriov.is_going_down) {
438 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
439 &dev->sriov.alias_guid.ports_guid[port_index].
448 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
449 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
458 dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
464 *(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
477 dev->sriov.alias_guid.ports_guid[port - 1].
479 if (dev->sriov.alias_guid.ports_guid[port - 1].
481 dev->sriov.alias_guid.ports_guid[port - 1].
500 &dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
540 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
542 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
545 ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
555 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
558 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
567 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
568 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
570 if (!dev->sriov.is_going_down) {
571 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
572 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
575 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
576 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
596 *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
613 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
614 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
616 if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags &
619 dev->sriov.alias_guid.ports_guid[port - 1].state_flags &=
625 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) {
631 cancel_delayed_work(&dev->sriov.alias_guid.
633 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
634 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
637 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
638 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
651 &dev->sriov.alias_guid.ports_guid[port].
700 rec = dev->sriov.alias_guid.ports_guid[port].
730 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
740 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
756 struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
781 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
782 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
783 if (!dev->sriov.is_going_down) {
788 cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port].
790 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
791 &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
793 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
794 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
800 struct mlx4_ib_sriov *sriov = &dev->sriov;
807 det = &sriov->alias_guid.ports_guid[i];
809 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
817 spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
821 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
823 spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
826 flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
827 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
829 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
830 kfree(dev->sriov.alias_guid.sa_client);
842 dev->sriov.alias_guid.sa_client =
843 kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL);
844 if (!dev->sriov.alias_guid.sa_client)
847 ib_sa_register_client(dev->sriov.alias_guid.sa_client);
849 spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);
859 memset(&dev->sriov.alias_guid.ports_guid[i], 0,
861 dev->sriov.alias_guid.ports_guid[i].state_flags |=
865 memset(dev->sriov.alias_guid.ports_guid[i].
867 sizeof(dev->sriov.alias_guid.ports_guid[i].
870 INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
878 dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
879 dev->sriov.alias_guid.ports_guid[i].port = i;
882 dev->sriov.alias_guid.ports_guid[i].wq =
884 if (!dev->sriov.alias_guid.ports_guid[i].wq) {
888 INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work,
895 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
896 dev->sriov.alias_guid.ports_guid[i].wq = NULL;
900 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
901 kfree(dev->sriov.alias_guid.sa_client);
902 dev->sriov.alias_guid.sa_client = NULL;