Lines Matching defs:nets

79 static struct rionet_net nets[RIONET_MAX_NETS];
182 add_num = nets[rnet->mport->id].nact;
197 if (nets[rnet->mport->id].active[i]) {
199 nets[rnet->mport->id].active[i]);
206 if (nets[rnet->mport->id].active[destid])
208 nets[rnet->mport->id].active[destid]);
239 if (!nets[netid].active[sid]) {
240 spin_lock(&nets[netid].lock);
241 list_for_each_entry(peer, &nets[netid].peers, node) {
243 nets[netid].active[sid] = peer->rdev;
244 nets[netid].nact++;
247 spin_unlock(&nets[netid].lock);
253 spin_lock(&nets[netid].lock);
254 if (nets[netid].active[sid]) {
255 nets[netid].active[sid] = NULL;
256 nets[netid].nact--;
258 spin_unlock(&nets[netid].lock);
354 spin_lock_irqsave(&nets[netid].lock, flags);
355 list_for_each_entry(peer, &nets[netid].peers, node) {
359 spin_unlock_irqrestore(&nets[netid].lock, flags);
384 spin_lock_irqsave(&nets[netid].lock, flags);
385 list_for_each_entry(peer, &nets[netid].peers, node) {
386 if (nets[netid].active[peer->rdev->destid]) {
388 nets[netid].active[peer->rdev->destid] = NULL;
393 spin_unlock_irqrestore(&nets[netid].lock, flags);
414 spin_lock_irqsave(&nets[netid].lock, flags);
415 list_for_each_entry(peer, &nets[netid].peers, node) {
418 if (nets[netid].active[rdev->destid]) {
425 nets[netid].active[rdev->destid] = NULL;
426 nets[netid].nact--;
432 spin_unlock_irqrestore(&nets[netid].lock, flags);
489 nets[mport->id].active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
491 if (!nets[mport->id].active) {
495 memset((void *)nets[mport->id].active, 0, rionet_active_bytes);
527 free_pages((unsigned long)nets[mport->id].active,
561 if (!nets[netid].ndev) {
588 INIT_LIST_HEAD(&nets[netid].peers);
589 spin_lock_init(&nets[netid].lock);
590 nets[netid].nact = 0;
591 nets[netid].ndev = ndev;
602 rnet = netdev_priv(nets[netid].ndev);
620 spin_lock_irqsave(&nets[netid].lock, flags);
621 list_add_tail(&peer->node, &nets[netid].peers);
622 spin_unlock_irqrestore(&nets[netid].lock, flags);
646 if (!nets[i].ndev)
649 spin_lock_irqsave(&nets[i].lock, flags);
650 list_for_each_entry(peer, &nets[i].peers, node) {
651 if (nets[i].active[peer->rdev->destid]) {
654 nets[i].active[peer->rdev->destid] = NULL;
657 spin_unlock_irqrestore(&nets[i].lock, flags);
672 WARN(nets[id].nact, "%s called when connected to %d peers\n",
673 __func__, nets[id].nact);
674 WARN(!nets[id].ndev, "%s called for mport without NDEV\n",
677 if (nets[id].ndev) {
678 ndev = nets[id].ndev;
682 free_pages((unsigned long)nets[id].active,
685 nets[id].active = NULL;
687 nets[id].ndev = NULL;