Lines Matching refs:rnet
96 struct rionet_private *rnet = netdev_priv(ndev);
99 i = rnet->rx_slot;
102 if (!rnet->rx_skb[i])
105 if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX)))
108 rnet->rx_skb[i]->data = data;
109 skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
110 rnet->rx_skb[i]->protocol =
111 eth_type_trans(rnet->rx_skb[i], ndev);
112 error = netif_rx(rnet->rx_skb[i]);
121 } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot);
129 struct rionet_private *rnet = netdev_priv(ndev);
131 i = rnet->rx_slot;
133 rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE);
135 if (!rnet->rx_skb[i])
138 rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX,
139 rnet->rx_skb[i]->data);
142 rnet->rx_slot = i;
148 struct rionet_private *rnet = netdev_priv(ndev);
150 rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len);
151 rnet->tx_skb[rnet->tx_slot] = skb;
156 if (++rnet->tx_cnt == RIONET_TX_RING_SIZE)
159 ++rnet->tx_slot;
160 rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);
162 if (netif_msg_tx_queued(rnet))
173 struct rionet_private *rnet = netdev_priv(ndev);
179 spin_lock_irqsave(&rnet->tx_lock, flags);
182 add_num = nets[rnet->mport->id].nact;
184 if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
186 spin_unlock_irqrestore(&rnet->tx_lock, flags);
195 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
197 if (nets[rnet->mport->id].active[i]) {
199 nets[rnet->mport->id].active[i]);
206 if (nets[rnet->mport->id].active[destid])
208 nets[rnet->mport->id].active[destid]);
222 spin_unlock_irqrestore(&rnet->tx_lock, flags);
231 struct rionet_private *rnet = netdev_priv(ndev);
233 unsigned char netid = rnet->mport->id;
235 if (netif_msg_intr(rnet))
260 if (netif_msg_intr(rnet))
270 struct rionet_private *rnet = netdev_priv(ndev);
272 if (netif_msg_intr(rnet))
276 spin_lock(&rnet->lock);
277 if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot)
279 spin_unlock(&rnet->lock);
285 struct rionet_private *rnet = netdev_priv(ndev);
287 spin_lock(&rnet->tx_lock);
289 if (netif_msg_intr(rnet))
294 while (rnet->tx_cnt && (rnet->ack_slot != slot)) {
296 dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]);
297 rnet->tx_skb[rnet->ack_slot] = NULL;
298 ++rnet->ack_slot;
299 rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1);
300 rnet->tx_cnt--;
303 if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
306 spin_unlock(&rnet->tx_lock);
313 struct rionet_private *rnet = netdev_priv(ndev);
314 unsigned char netid = rnet->mport->id;
317 if (netif_msg_ifup(rnet))
320 if ((rc = rio_request_inb_dbell(rnet->mport,
327 if ((rc = rio_request_inb_mbox(rnet->mport,
334 if ((rc = rio_request_outb_mbox(rnet->mport,
343 rnet->rx_skb[i] = NULL;
344 rnet->rx_slot = 0;
347 rnet->tx_slot = 0;
348 rnet->tx_cnt = 0;
349 rnet->ack_slot = 0;
360 rnet->open = true;
368 struct rionet_private *rnet = netdev_priv(ndev);
370 unsigned char netid = rnet->mport->id;
374 if (netif_msg_ifup(rnet))
379 rnet->open = false;
382 kfree_skb(rnet->rx_skb[i]);
395 rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
397 rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX);
398 rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX);
444 struct rionet_private *rnet = netdev_priv(ndev);
449 strlcpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info));
454 struct rionet_private *rnet = netdev_priv(ndev);
456 return rnet->msg_enable;
461 struct rionet_private *rnet = netdev_priv(ndev);
463 rnet->msg_enable = value;
484 struct rionet_private *rnet;
498 rnet = netdev_priv(ndev);
499 rnet->mport = mport;
500 rnet->open = false;
520 spin_lock_init(&rnet->lock);
521 spin_lock_init(&rnet->tx_lock);
523 rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
599 struct rionet_private *rnet;
602 rnet = netdev_priv(nets[netid].ndev);
627 if (rnet->open)