Lines Matching refs:vp
104 static void vector_reset_stats(struct vector_private *vp)
106 vp->estats.rx_queue_max = 0;
107 vp->estats.rx_queue_running_average = 0;
108 vp->estats.tx_queue_max = 0;
109 vp->estats.tx_queue_running_average = 0;
110 vp->estats.rx_encaps_errors = 0;
111 vp->estats.tx_timeout_count = 0;
112 vp->estats.tx_restart_queue = 0;
113 vp->estats.tx_kicks = 0;
114 vp->estats.tx_flow_control_xon = 0;
115 vp->estats.tx_flow_control_xoff = 0;
116 vp->estats.sg_ok = 0;
117 vp->estats.sg_linearized = 0;
293 static int prep_msg(struct vector_private *vp,
306 if (vp->header_size > 0) {
307 iov[iov_index].iov_len = vp->header_size;
308 vp->form_header(iov[iov_index].iov_base, skb, vp);
314 vp->estats.sg_ok++;
336 struct vector_private *vp = netdev_priv(qi->dev);
355 vp,
362 mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr;
363 mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size;
410 struct vector_private *vp = netdev_priv(qi->dev);
430 vp->fds->tx_fd,
435 vp->in_write_poll =
445 netdev_err(vp->dev, "sendmmsg err=%i\n",
447 vp->in_error = true;
457 if (result > vp->estats.tx_queue_max)
458 vp->estats.tx_queue_max = result;
459 vp->estats.tx_queue_running_average =
460 (vp->estats.tx_queue_running_average + result) >> 1;
468 vp->estats.tx_restart_queue++;
475 tasklet_schedule(&vp->tx_poll);
488 struct vector_private *vp = netdev_priv(qi->dev);
509 if ((vp->header_size > 0) &&
525 struct vector_private *vp,
539 result->dev = vp->dev;
563 if (vp->header_size > 0)
582 if (vp->header_size > 0) {
620 struct vector_private *vp,
623 int linear = vp->max_packet + vp->headroom + SAFETY_MARGIN;
630 if (vp->req_size <= linear)
633 len = vp->req_size;
636 len - vp->max_packet,
641 if (vp->header_size > 0)
648 skb_reserve(result, vp->headroom);
649 result->dev = vp->dev;
650 skb_put(result, vp->max_packet);
651 result->data_len = len - vp->max_packet;
652 result->len += len - vp->max_packet;
656 iov[iov_index].iov_len = vp->max_packet;
679 struct vector_private *vp = netdev_priv(qi->dev);
692 *skbuff_vector = prep_skb(vp, &mmsg_vector->msg_hdr);
801 struct vector_private *vp;
807 vp = netdev_priv(dev);
808 if (vp->fds != NULL)
842 static int vector_legacy_rx(struct vector_private *vp)
858 if (vp->header_size > 0) {
859 iov[0].iov_base = vp->header_rxbuffer;
860 iov[0].iov_len = vp->header_size;
863 skb = prep_skb(vp, &hdr);
872 vp->dev->stats.rx_dropped++;
875 pkt_len = uml_vector_recvmsg(vp->fds->rx_fd, &hdr, 0);
877 vp->in_error = true;
882 if (pkt_len > vp->header_size) {
883 if (vp->header_size > 0) {
884 header_check = vp->verify_header(
885 vp->header_rxbuffer, skb, vp);
888 vp->dev->stats.rx_dropped++;
889 vp->estats.rx_encaps_errors++;
893 vp->estats.rx_csum_offload_good++;
897 pskb_trim(skb, pkt_len - vp->rx_header_size);
899 vp->dev->stats.rx_bytes += skb->len;
900 vp->dev->stats.rx_packets++;
916 static int writev_tx(struct vector_private *vp, struct sk_buff *skb)
921 iov[0].iov_base = vp->header_txbuffer;
922 iov_count = prep_msg(vp, skb, (struct iovec *) &iov);
928 vp->fds->tx_fd,
936 netif_trans_update(vp->dev);
937 netif_wake_queue(vp->dev);
940 vp->dev->stats.tx_bytes += skb->len;
941 vp->dev->stats.tx_packets++;
943 vp->dev->stats.tx_dropped++;
948 vp->dev->stats.tx_dropped++;
951 vp->in_error = true;
960 static int vector_mmsg_rx(struct vector_private *vp)
963 struct vector_queue *qi = vp->rx_queue;
978 vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
981 vp->in_error = true;
995 if (mmsg_vector->msg_len > vp->header_size) {
996 if (vp->header_size > 0) {
997 header_check = vp->verify_header(
1000 vp
1009 vp->estats.rx_encaps_errors++;
1013 vp->estats.rx_csum_offload_good++;
1018 mmsg_vector->msg_len - vp->rx_header_size);
1024 vp->dev->stats.rx_bytes += skb->len;
1025 vp->dev->stats.rx_packets++;
1041 if (vp->estats.rx_queue_max < packet_count)
1042 vp->estats.rx_queue_max = packet_count;
1043 vp->estats.rx_queue_running_average =
1044 (vp->estats.rx_queue_running_average + packet_count) >> 1;
1049 static void vector_rx(struct vector_private *vp)
1054 if ((vp->options & VECTOR_RX) > 0)
1055 while (((err = vector_mmsg_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
1058 while (((err = vector_legacy_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
1061 netdev_err(vp->dev, "vector_rx: error(%d)\n", err);
1063 netdev_err(vp->dev, "vector_rx: device stuck, remote end may have closed the connection\n");
1068 struct vector_private *vp = netdev_priv(dev);
1071 if (vp->in_error) {
1072 deactivate_fd(vp->fds->rx_fd, vp->rx_irq);
1073 if ((vp->fds->rx_fd != vp->fds->tx_fd) && (vp->tx_irq != 0))
1074 deactivate_fd(vp->fds->tx_fd, vp->tx_irq);
1078 if ((vp->options & VECTOR_TX) == 0) {
1079 writev_tx(vp, skb);
1087 netdev_sent_queue(vp->dev, skb->len);
1088 queue_depth = vector_enqueue(vp->tx_queue, skb);
1094 if (queue_depth >= vp->tx_queue->max_depth - 1) {
1095 vp->estats.tx_kicks++;
1097 vector_send(vp->tx_queue);
1101 mod_timer(&vp->tl, vp->coalesce);
1105 vp->estats.tx_kicks++;
1106 vector_send(vp->tx_queue);
1108 tasklet_schedule(&vp->tx_poll);
1115 struct vector_private *vp = netdev_priv(dev);
1119 vector_rx(vp);
1127 struct vector_private *vp = netdev_priv(dev);
1138 if (vp->in_write_poll)
1139 tasklet_schedule(&vp->tx_poll);
1148 struct vector_private *vp = netdev_priv(dev);
1152 del_timer(&vp->tl);
1154 if (vp->fds == NULL)
1158 if (vp->rx_irq > 0) {
1159 um_free_irq(vp->rx_irq, dev);
1160 vp->rx_irq = 0;
1162 if (vp->tx_irq > 0) {
1163 um_free_irq(vp->tx_irq, dev);
1164 vp->tx_irq = 0;
1166 tasklet_kill(&vp->tx_poll);
1167 if (vp->fds->rx_fd > 0) {
1168 if (vp->bpf)
1169 uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
1170 os_close_file(vp->fds->rx_fd);
1171 vp->fds->rx_fd = -1;
1173 if (vp->fds->tx_fd > 0) {
1174 os_close_file(vp->fds->tx_fd);
1175 vp->fds->tx_fd = -1;
1177 if (vp->bpf != NULL)
1178 kfree(vp->bpf->filter);
1179 kfree(vp->bpf);
1180 vp->bpf = NULL;
1181 kfree(vp->fds->remote_addr);
1182 kfree(vp->transport_data);
1183 kfree(vp->header_rxbuffer);
1184 kfree(vp->header_txbuffer);
1185 if (vp->rx_queue != NULL)
1186 destroy_queue(vp->rx_queue);
1187 if (vp->tx_queue != NULL)
1188 destroy_queue(vp->tx_queue);
1189 kfree(vp->fds);
1190 vp->fds = NULL;
1191 spin_lock_irqsave(&vp->lock, flags);
1192 vp->opened = false;
1193 vp->in_error = false;
1194 spin_unlock_irqrestore(&vp->lock, flags);
1202 struct vector_private *vp = (struct vector_private *)data;
1204 vp->estats.tx_kicks++;
1205 vector_send(vp->tx_queue);
1209 struct vector_private *vp =
1211 netdev_reset_queue(vp->dev);
1212 netif_start_queue(vp->dev);
1213 netif_wake_queue(vp->dev);
1218 struct vector_private *vp = netdev_priv(dev);
1223 spin_lock_irqsave(&vp->lock, flags);
1224 if (vp->opened) {
1225 spin_unlock_irqrestore(&vp->lock, flags);
1228 vp->opened = true;
1229 spin_unlock_irqrestore(&vp->lock, flags);
1231 vp->bpf = uml_vector_user_bpf(get_bpf_file(vp->parsed));
1233 vp->fds = uml_vector_user_open(vp->unit, vp->parsed);
1235 if (vp->fds == NULL)
1238 if (build_transport_data(vp) < 0)
1241 if ((vp->options & VECTOR_RX) > 0) {
1242 vp->rx_queue = create_queue(
1243 vp,
1244 get_depth(vp->parsed),
1245 vp->rx_header_size,
1248 vp->rx_queue->queue_depth = get_depth(vp->parsed);
1250 vp->header_rxbuffer = kmalloc(
1251 vp->rx_header_size,
1254 if (vp->header_rxbuffer == NULL)
1257 if ((vp->options & VECTOR_TX) > 0) {
1258 vp->tx_queue = create_queue(
1259 vp,
1260 get_depth(vp->parsed),
1261 vp->header_size,
1265 vp->header_txbuffer = kmalloc(vp->header_size, GFP_KERNEL);
1266 if (vp->header_txbuffer == NULL)
1272 irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd,
1280 vp->rx_irq = irq_rr + VECTOR_BASE_IRQ;
1285 if ((vp->options & VECTOR_TX) > 0) {
1287 irq_rr + VECTOR_BASE_IRQ, vp->fds->tx_fd,
1296 vp->tx_irq = irq_rr + VECTOR_BASE_IRQ;
1300 if ((vp->options & VECTOR_QDISC_BYPASS) != 0) {
1301 if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd))
1302 vp->options |= VECTOR_BPF;
1304 if (((vp->options & VECTOR_BPF) != 0) && (vp->bpf == NULL))
1305 vp->bpf = uml_vector_default_bpf(dev->dev_addr);
1307 if (vp->bpf != NULL)
1308 uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
1317 vector_rx(vp);
1319 vector_reset_stats(vp);
1320 vdevice = find_device(vp->unit);
1323 if ((vp->options & VECTOR_TX) != 0)
1324 add_timer(&vp->tl);
1340 struct vector_private *vp = netdev_priv(dev);
1342 vp->estats.tx_timeout_count++;
1344 schedule_work(&vp->reset_tx);
1357 struct vector_private *vp = netdev_priv(dev);
1364 vp->req_size = 65536;
1367 vp->req_size = vp->max_packet + vp->headroom + SAFETY_MARGIN;
1389 struct vector_private *vp = netdev_priv(dev);
1394 if (!(vp->options & VECTOR_BPF_FLASH)) {
1399 spin_lock(&vp->lock);
1401 if (vp->bpf != NULL) {
1402 if (vp->opened)
1403 uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
1404 kfree(vp->bpf->filter);
1405 vp->bpf->filter = NULL;
1407 vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC);
1408 if (vp->bpf == NULL) {
1414 vdevice = find_device(vp->unit);
1419 vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC);
1420 if (!vp->bpf->filter)
1423 vp->bpf->len = fw->size / sizeof(struct sock_filter);
1426 if (vp->opened)
1427 result = uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
1429 spin_unlock(&vp->lock);
1437 spin_unlock(&vp->lock);
1438 if (vp->bpf != NULL)
1439 kfree(vp->bpf->filter);
1440 kfree(vp->bpf);
1441 vp->bpf = NULL;
1448 struct vector_private *vp = netdev_priv(netdev);
1450 ring->rx_max_pending = vp->rx_queue->max_depth;
1451 ring->tx_max_pending = vp->tx_queue->max_depth;
1452 ring->rx_pending = vp->rx_queue->max_depth;
1453 ring->tx_pending = vp->tx_queue->max_depth;
1487 struct vector_private *vp = netdev_priv(dev);
1489 memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats));
1495 struct vector_private *vp = netdev_priv(netdev);
1497 ec->tx_coalesce_usecs = (vp->coalesce * 1000000) / HZ;
1504 struct vector_private *vp = netdev_priv(netdev);
1506 vp->coalesce = (ec->tx_coalesce_usecs * HZ) / 1000000;
1507 if (vp->coalesce == 0)
1508 vp->coalesce = 1;
1545 struct vector_private *vp = from_timer(vp, t, tl);
1547 vp->estats.tx_kicks++;
1548 vector_send(vp->tx_queue);
1558 struct vector_private *vp;
1585 vp = netdev_priv(dev);
1602 *vp = ((struct vector_private)
1604 .list = LIST_HEAD_INIT(vp->list),
1633 tasklet_init(&vp->tx_poll, vector_tx_poll, (unsigned long)vp);
1634 INIT_WORK(&vp->reset_tx, vector_reset_tx);
1636 timer_setup(&vp->tl, vector_timer_expire, 0);
1637 spin_lock_init(&vp->lock);