Lines Matching refs:vp
70 static int vector_mmsg_rx(struct vector_private *vp, int budget);
103 static void vector_reset_stats(struct vector_private *vp)
105 vp->estats.rx_queue_max = 0;
106 vp->estats.rx_queue_running_average = 0;
107 vp->estats.tx_queue_max = 0;
108 vp->estats.tx_queue_running_average = 0;
109 vp->estats.rx_encaps_errors = 0;
110 vp->estats.tx_timeout_count = 0;
111 vp->estats.tx_restart_queue = 0;
112 vp->estats.tx_kicks = 0;
113 vp->estats.tx_flow_control_xon = 0;
114 vp->estats.tx_flow_control_xoff = 0;
115 vp->estats.sg_ok = 0;
116 vp->estats.sg_linearized = 0;
292 static int prep_msg(struct vector_private *vp,
305 if (vp->header_size > 0) {
306 iov[iov_index].iov_len = vp->header_size;
307 vp->form_header(iov[iov_index].iov_base, skb, vp);
313 vp->estats.sg_ok++;
335 struct vector_private *vp = netdev_priv(qi->dev);
354 vp,
361 mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr;
362 mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size;
409 struct vector_private *vp = netdev_priv(qi->dev);
429 vp->fds->tx_fd,
434 vp->in_write_poll =
444 netdev_err(vp->dev, "sendmmsg err=%i\n",
446 vp->in_error = true;
456 if (result > vp->estats.tx_queue_max)
457 vp->estats.tx_queue_max = result;
458 vp->estats.tx_queue_running_average =
459 (vp->estats.tx_queue_running_average + result) >> 1;
466 vp->estats.tx_restart_queue++;
484 struct vector_private *vp = netdev_priv(qi->dev);
505 if ((vp->header_size > 0) &&
521 struct vector_private *vp,
535 result->dev = vp->dev;
559 if (vp->header_size > 0)
578 if (vp->header_size > 0) {
616 struct vector_private *vp,
619 int linear = vp->max_packet + vp->headroom + SAFETY_MARGIN;
626 if (vp->req_size <= linear)
629 len = vp->req_size;
632 len - vp->max_packet,
637 if (vp->header_size > 0)
644 skb_reserve(result, vp->headroom);
645 result->dev = vp->dev;
646 skb_put(result, vp->max_packet);
647 result->data_len = len - vp->max_packet;
648 result->len += len - vp->max_packet;
652 iov[iov_index].iov_len = vp->max_packet;
675 struct vector_private *vp = netdev_priv(qi->dev);
688 *skbuff_vector = prep_skb(vp, &mmsg_vector->msg_hdr);
797 struct vector_private *vp;
803 vp = netdev_priv(dev);
804 if (vp->fds != NULL)
838 static int vector_legacy_rx(struct vector_private *vp)
854 if (vp->header_size > 0) {
855 iov[0].iov_base = vp->header_rxbuffer;
856 iov[0].iov_len = vp->header_size;
859 skb = prep_skb(vp, &hdr);
868 vp->dev->stats.rx_dropped++;
871 pkt_len = uml_vector_recvmsg(vp->fds->rx_fd, &hdr, 0);
873 vp->in_error = true;
878 if (pkt_len > vp->header_size) {
879 if (vp->header_size > 0) {
880 header_check = vp->verify_header(
881 vp->header_rxbuffer, skb, vp);
884 vp->dev->stats.rx_dropped++;
885 vp->estats.rx_encaps_errors++;
889 vp->estats.rx_csum_offload_good++;
893 pskb_trim(skb, pkt_len - vp->rx_header_size);
895 vp->dev->stats.rx_bytes += skb->len;
896 vp->dev->stats.rx_packets++;
897 napi_gro_receive(&vp->napi, skb);
912 static int writev_tx(struct vector_private *vp, struct sk_buff *skb)
917 iov[0].iov_base = vp->header_txbuffer;
918 iov_count = prep_msg(vp, skb, (struct iovec *) &iov);
924 vp->fds->tx_fd,
932 netif_trans_update(vp->dev);
933 netif_wake_queue(vp->dev);
936 vp->dev->stats.tx_bytes += skb->len;
937 vp->dev->stats.tx_packets++;
939 vp->dev->stats.tx_dropped++;
944 vp->dev->stats.tx_dropped++;
947 vp->in_error = true;
956 static int vector_mmsg_rx(struct vector_private *vp, int budget)
959 struct vector_queue *qi = vp->rx_queue;
977 vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
980 vp->in_error = true;
994 if (mmsg_vector->msg_len > vp->header_size) {
995 if (vp->header_size > 0) {
996 header_check = vp->verify_header(
999 vp
1008 vp->estats.rx_encaps_errors++;
1012 vp->estats.rx_csum_offload_good++;
1017 mmsg_vector->msg_len - vp->rx_header_size);
1023 vp->dev->stats.rx_bytes += skb->len;
1024 vp->dev->stats.rx_packets++;
1025 napi_gro_receive(&vp->napi, skb);
1040 if (vp->estats.rx_queue_max < packet_count)
1041 vp->estats.rx_queue_max = packet_count;
1042 vp->estats.rx_queue_running_average =
1043 (vp->estats.rx_queue_running_average + packet_count) >> 1;
1050 struct vector_private *vp = netdev_priv(dev);
1053 if (vp->in_error) {
1054 deactivate_fd(vp->fds->rx_fd, vp->rx_irq);
1055 if ((vp->fds->rx_fd != vp->fds->tx_fd) && (vp->tx_irq != 0))
1056 deactivate_fd(vp->fds->tx_fd, vp->tx_irq);
1060 if ((vp->options & VECTOR_TX) == 0) {
1061 writev_tx(vp, skb);
1069 netdev_sent_queue(vp->dev, skb->len);
1070 queue_depth = vector_enqueue(vp->tx_queue, skb);
1072 if (queue_depth < vp->tx_queue->max_depth && netdev_xmit_more()) {
1073 mod_timer(&vp->tl, vp->coalesce);
1076 queue_depth = vector_send(vp->tx_queue);
1078 napi_schedule(&vp->napi);
1087 struct vector_private *vp = netdev_priv(dev);
1091 napi_schedule(&vp->napi);
1099 struct vector_private *vp = netdev_priv(dev);
1110 napi_schedule(&vp->napi);
1119 struct vector_private *vp = netdev_priv(dev);
1123 del_timer(&vp->tl);
1125 if (vp->fds == NULL)
1129 if (vp->rx_irq > 0) {
1130 um_free_irq(vp->rx_irq, dev);
1131 vp->rx_irq = 0;
1133 if (vp->tx_irq > 0) {
1134 um_free_irq(vp->tx_irq, dev);
1135 vp->tx_irq = 0;
1137 napi_disable(&vp->napi);
1138 netif_napi_del(&vp->napi);
1139 if (vp->fds->rx_fd > 0) {
1140 if (vp->bpf)
1141 uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
1142 os_close_file(vp->fds->rx_fd);
1143 vp->fds->rx_fd = -1;
1145 if (vp->fds->tx_fd > 0) {
1146 os_close_file(vp->fds->tx_fd);
1147 vp->fds->tx_fd = -1;
1149 if (vp->bpf != NULL)
1150 kfree(vp->bpf->filter);
1151 kfree(vp->bpf);
1152 vp->bpf = NULL;
1153 kfree(vp->fds->remote_addr);
1154 kfree(vp->transport_data);
1155 kfree(vp->header_rxbuffer);
1156 kfree(vp->header_txbuffer);
1157 if (vp->rx_queue != NULL)
1158 destroy_queue(vp->rx_queue);
1159 if (vp->tx_queue != NULL)
1160 destroy_queue(vp->tx_queue);
1161 kfree(vp->fds);
1162 vp->fds = NULL;
1163 spin_lock_irqsave(&vp->lock, flags);
1164 vp->opened = false;
1165 vp->in_error = false;
1166 spin_unlock_irqrestore(&vp->lock, flags);
1172 struct vector_private *vp = container_of(napi, struct vector_private, napi);
1177 if ((vp->options & VECTOR_TX) != 0)
1178 tx_enqueued = (vector_send(vp->tx_queue) > 0);
1179 if ((vp->options & VECTOR_RX) > 0)
1180 err = vector_mmsg_rx(vp, budget);
1182 err = vector_legacy_rx(vp);
1198 struct vector_private *vp =
1200 netdev_reset_queue(vp->dev);
1201 netif_start_queue(vp->dev);
1202 netif_wake_queue(vp->dev);
1207 struct vector_private *vp = netdev_priv(dev);
1212 spin_lock_irqsave(&vp->lock, flags);
1213 if (vp->opened) {
1214 spin_unlock_irqrestore(&vp->lock, flags);
1217 vp->opened = true;
1218 spin_unlock_irqrestore(&vp->lock, flags);
1220 vp->bpf = uml_vector_user_bpf(get_bpf_file(vp->parsed));
1222 vp->fds = uml_vector_user_open(vp->unit, vp->parsed);
1224 if (vp->fds == NULL)
1227 if (build_transport_data(vp) < 0)
1230 if ((vp->options & VECTOR_RX) > 0) {
1231 vp->rx_queue = create_queue(
1232 vp,
1233 get_depth(vp->parsed),
1234 vp->rx_header_size,
1237 vp->rx_queue->queue_depth = get_depth(vp->parsed);
1239 vp->header_rxbuffer = kmalloc(
1240 vp->rx_header_size,
1243 if (vp->header_rxbuffer == NULL)
1246 if ((vp->options & VECTOR_TX) > 0) {
1247 vp->tx_queue = create_queue(
1248 vp,
1249 get_depth(vp->parsed),
1250 vp->header_size,
1254 vp->header_txbuffer = kmalloc(vp->header_size, GFP_KERNEL);
1255 if (vp->header_txbuffer == NULL)
1259 netif_napi_add_weight(vp->dev, &vp->napi, vector_poll,
1260 get_depth(vp->parsed));
1261 napi_enable(&vp->napi);
1265 irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd,
1273 vp->rx_irq = irq_rr + VECTOR_BASE_IRQ;
1278 if ((vp->options & VECTOR_TX) > 0) {
1280 irq_rr + VECTOR_BASE_IRQ, vp->fds->tx_fd,
1289 vp->tx_irq = irq_rr + VECTOR_BASE_IRQ;
1293 if ((vp->options & VECTOR_QDISC_BYPASS) != 0) {
1294 if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd))
1295 vp->options |= VECTOR_BPF;
1297 if (((vp->options & VECTOR_BPF) != 0) && (vp->bpf == NULL))
1298 vp->bpf = uml_vector_default_bpf(dev->dev_addr);
1300 if (vp->bpf != NULL)
1301 uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
1304 vector_reset_stats(vp);
1311 napi_schedule(&vp->napi);
1313 vdevice = find_device(vp->unit);
1316 if ((vp->options & VECTOR_TX) != 0)
1317 add_timer(&vp->tl);
1333 struct vector_private *vp = netdev_priv(dev);
1335 vp->estats.tx_timeout_count++;
1337 schedule_work(&vp->reset_tx);
1350 struct vector_private *vp = netdev_priv(dev);
1357 vp->req_size = 65536;
1360 vp->req_size = vp->max_packet + vp->headroom + SAFETY_MARGIN;
1382 struct vector_private *vp = netdev_priv(dev);
1387 if (!(vp->options & VECTOR_BPF_FLASH)) {
1392 spin_lock(&vp->lock);
1394 if (vp->bpf != NULL) {
1395 if (vp->opened)
1396 uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
1397 kfree(vp->bpf->filter);
1398 vp->bpf->filter = NULL;
1400 vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC);
1401 if (vp->bpf == NULL) {
1407 vdevice = find_device(vp->unit);
1412 vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC);
1413 if (!vp->bpf->filter)
1416 vp->bpf->len = fw->size / sizeof(struct sock_filter);
1419 if (vp->opened)
1420 result = uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
1422 spin_unlock(&vp->lock);
1430 spin_unlock(&vp->lock);
1431 if (vp->bpf != NULL)
1432 kfree(vp->bpf->filter);
1433 kfree(vp->bpf);
1434 vp->bpf = NULL;
1443 struct vector_private *vp = netdev_priv(netdev);
1445 ring->rx_max_pending = vp->rx_queue->max_depth;
1446 ring->tx_max_pending = vp->tx_queue->max_depth;
1447 ring->rx_pending = vp->rx_queue->max_depth;
1448 ring->tx_pending = vp->tx_queue->max_depth;
1482 struct vector_private *vp = netdev_priv(dev);
1484 memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats));
1492 struct vector_private *vp = netdev_priv(netdev);
1494 ec->tx_coalesce_usecs = (vp->coalesce * 1000000) / HZ;
1503 struct vector_private *vp = netdev_priv(netdev);
1505 vp->coalesce = (ec->tx_coalesce_usecs * HZ) / 1000000;
1506 if (vp->coalesce == 0)
1507 vp->coalesce = 1;
1543 struct vector_private *vp = from_timer(vp, t, tl);
1545 vp->estats.tx_kicks++;
1546 napi_schedule(&vp->napi);
1558 struct vector_private *vp;
1585 vp = netdev_priv(dev);
1602 *vp = ((struct vector_private)
1604 .list = LIST_HEAD_INIT(vp->list),
1633 INIT_WORK(&vp->reset_tx, vector_reset_tx);
1635 timer_setup(&vp->tl, vector_timer_expire, 0);
1636 spin_lock_init(&vp->lock);