Lines Matching defs:sriov

85 	guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
101 memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id],
116 return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index];
135 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
136 if (dev->sriov.alias_guid.ports_guid[port_index].state_flags &
140 curr_guid = *(__be64 *)&dev->sriov.
153 *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index].
156 dev->sriov.alias_guid.ports_guid[port_index].
159 dev->sriov.alias_guid.ports_guid[port_index].
163 dev->sriov.alias_guid.ports_guid[port_index].
165 dev->sriov.alias_guid.ports_guid[port_index].
170 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
203 rec = &dev->sriov.alias_guid.ports_guid[port_num - 1].
205 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
235 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
247 spin_unlock_irqrestore(&dev->sriov.
252 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock,
306 rec = &dev->sriov.alias_guid.ports_guid[port_index].
326 rec = &dev->sriov.alias_guid.ports_guid[port_index].
329 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
423 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
433 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
434 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
435 if (!dev->sriov.is_going_down) {
437 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
438 &dev->sriov.alias_guid.ports_guid[port_index].
447 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
448 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
457 dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
463 *(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
476 dev->sriov.alias_guid.ports_guid[port - 1].
478 if (dev->sriov.alias_guid.ports_guid[port - 1].
480 dev->sriov.alias_guid.ports_guid[port - 1].
499 &dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
539 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
541 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
544 ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
554 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
557 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
566 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
567 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
569 if (!dev->sriov.is_going_down) {
570 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
571 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
574 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
575 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
595 *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
612 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
613 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
615 if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags &
618 dev->sriov.alias_guid.ports_guid[port - 1].state_flags &=
624 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) {
630 cancel_delayed_work(&dev->sriov.alias_guid.
632 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
633 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
636 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
637 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
650 &dev->sriov.alias_guid.ports_guid[port].
699 rec = dev->sriov.alias_guid.ports_guid[port].
729 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
739 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
755 struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
780 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
781 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
782 if (!dev->sriov.is_going_down) {
787 cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port].
789 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
790 &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
792 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
793 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
799 struct mlx4_ib_sriov *sriov = &dev->sriov;
806 det = &sriov->alias_guid.ports_guid[i];
808 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
816 spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
820 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
822 spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
825 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
826 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
827 kfree(dev->sriov.alias_guid.sa_client);
839 dev->sriov.alias_guid.sa_client =
840 kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL);
841 if (!dev->sriov.alias_guid.sa_client)
844 ib_sa_register_client(dev->sriov.alias_guid.sa_client);
846 spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);
856 memset(&dev->sriov.alias_guid.ports_guid[i], 0,
858 dev->sriov.alias_guid.ports_guid[i].state_flags |=
862 memset(dev->sriov.alias_guid.ports_guid[i].
864 sizeof(dev->sriov.alias_guid.ports_guid[i].
867 INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
875 dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
876 dev->sriov.alias_guid.ports_guid[i].port = i;
879 dev->sriov.alias_guid.ports_guid[i].wq =
881 if (!dev->sriov.alias_guid.ports_guid[i].wq) {
885 INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work,
892 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
893 dev->sriov.alias_guid.ports_guid[i].wq = NULL;
897 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
898 kfree(dev->sriov.alias_guid.sa_client);
899 dev->sriov.alias_guid.sa_client = NULL;