Lines Matching refs:dr
49 static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
51 return vio_dring_avail(dr, VNET_TX_RING_SIZE);
263 struct vio_dring_state *dr;
265 dr = &vio->drings[VIO_DRIVER_RX_RING];
266 dr->rcv_nxt = 1;
267 dr->snd_nxt = 1;
269 dr = &vio->drings[VIO_DRIVER_TX_RING];
270 dr->rcv_nxt = 1;
271 dr->snd_nxt = 1;
470 static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
480 .dring_ident = dr->ident,
488 hdr.seq = dr->snd_nxt;
493 dr->snd_nxt++;
520 struct vio_dring_state *dr,
526 err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
527 (index * dr->entry_size),
528 dr->cookies, dr->ncookies);
536 struct vio_dring_state *dr,
542 err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
543 (index * dr->entry_size),
544 dr->cookies, dr->ncookies);
552 struct vio_dring_state *dr,
555 struct vio_net_desc *desc = get_rx_desc(port, dr, index);
580 err = put_rx_desc(port, dr, desc, index);
587 static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
594 end = (end == (u32)-1) ? vio_dring_prev(dr, start)
595 : vio_dring_next(dr, end);
600 int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
610 start = vio_dring_next(dr, start);
612 err = vnet_send_ack(port, dr, ack_start, ack_end,
624 ack_end = vio_dring_prev(dr, start);
632 return vnet_send_ack(port, dr, ack_start, ack_end,
648 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
652 pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
656 if (unlikely(pkt->seq != dr->rcv_nxt)) {
658 pkt->seq, dr->rcv_nxt);
663 dr->rcv_nxt++;
667 return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
671 static int idx_is_pending(struct vio_dring_state *dr, u32 end)
673 u32 idx = dr->cons;
676 while (idx != dr->prod) {
681 idx = vio_dring_next(dr, idx);
688 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
701 if (unlikely(!idx_is_pending(dr, end))) {
711 dr->cons = vio_dring_next(dr, end);
712 desc = vio_dring_entry(dr, dr->cons);
718 if (__vnet_tx_trigger(port, dr->cons) > 0)
729 vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
845 struct vio_dring_state *dr =
851 pkt->seq = dr->rcv_nxt;
852 pkt->start_idx = vio_dring_next(dr,
942 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
950 .dring_ident = dr->ident,
969 hdr.seq = dr->snd_nxt;
974 dr->snd_nxt++;
992 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
998 txi = dr->prod;
1006 d = vio_dring_entry(dr, txi);
1226 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1257 if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
1262 if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
1329 struct vio_dring_state *dr;
1378 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1381 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1393 d = vio_dring_cur(dr);
1395 txi = dr->prod;
1452 /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
1455 * a DRING_STOPPED is received from the consumer. The dr->cons field
1474 port->vio._peer_sid, dr->cons);
1478 err = __vnet_tx_trigger(port, dr->cons);
1496 dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
1497 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1500 if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
1673 struct vio_dring_state *dr;
1676 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1678 if (!dr->base)
1688 d = vio_dring_entry(dr, i);
1697 ldc_free_exp_dring(port->vio.lp, dr->base,
1698 (dr->entry_size * dr->num_entries),
1699 dr->cookies, dr->ncookies);
1700 dr->base = NULL;
1701 dr->entry_size = 0;
1702 dr->num_entries = 0;
1703 dr->pending = 0;
1704 dr->ncookies = 0;
1720 struct vio_dring_state *dr;
1725 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1735 dr->cookies, &ncookies,
1744 dr->base = dring;
1745 dr->entry_size = elen;
1746 dr->num_entries = VNET_TX_RING_SIZE;
1747 dr->prod = 0;
1748 dr->cons = 0;
1750 dr->pending = VNET_TX_RING_SIZE;
1751 dr->ncookies = ncookies;
1756 d = vio_dring_entry(dr, i);