Lines Matching refs:cfv
142 struct cfv_info *cfv = vq_tx->vdev->priv;
144 ++cfv->stats.tx_kicks;
145 tasklet_schedule(&cfv->tx_release_tasklet);
148 static void free_buf_info(struct cfv_info *cfv, struct buf_info *buf_info)
152 gen_pool_free(cfv->genpool, (unsigned long) buf_info->vaddr,
162 struct cfv_info *cfv = vq_tx->vdev->priv;
165 BUG_ON(vq_tx != cfv->vq_tx);
172 spin_lock_irqsave(&cfv->tx_lock, flags);
174 spin_unlock_irqrestore(&cfv->tx_lock, flags);
180 free_buf_info(cfv, buf_info);
186 if (cfv->vq_tx->num_free <= cfv->watermark_tx)
190 if (cfv->reserved_mem == 0 && cfv->genpool)
191 cfv->reserved_mem =
192 gen_pool_alloc(cfv->genpool,
193 cfv->reserved_size);
196 if (cfv->reserved_mem) {
197 cfv->watermark_tx =
198 virtqueue_get_vring_size(cfv->vq_tx);
199 netif_tx_wake_all_queues(cfv->ndev);
203 virtqueue_disable_cb(cfv->vq_tx);
204 ++cfv->stats.tx_flow_on;
207 WARN_ON(cfv->watermark_tx >
208 virtqueue_get_vring_size(cfv->vq_tx));
209 cfv->watermark_tx +=
210 virtqueue_get_vring_size(cfv->vq_tx) / 4;
217 struct cfv_info *cfv,
225 if (frm_len > cfv->mru || frm_len <= cfv->rx_hr + cfv->rx_tr) {
226 netdev_err(cfv->ndev,
228 frm_len, cfv->mru, cfv->rx_hr,
229 cfv->rx_tr);
234 cfpkt_len = frm_len - (cfv->rx_hr + cfv->rx_tr);
235 pad_len = (unsigned long)(frm + cfv->rx_hr) & (IP_HDR_ALIGN - 1);
237 skb = netdev_alloc_skb(cfv->ndev, frm_len + pad_len);
243 skb_reserve(skb, cfv->rx_hr + pad_len);
245 skb_put_data(skb, frm + cfv->rx_hr, cfpkt_len);
252 struct cfv_info *cfv = container_of(napi, struct cfv_info, napi);
257 struct vringh_kiov *riov = &cfv->ctx.riov;
267 if (cfv->ctx.head != USHRT_MAX) {
268 vringh_complete_kern(cfv->vr_rx,
269 cfv->ctx.head,
271 cfv->ctx.head = USHRT_MAX;
275 cfv->vr_rx,
278 &cfv->ctx.head,
288 skb = cfv_alloc_and_copy_skb(&err, cfv, buf,
297 skb->dev = cfv->ndev;
300 ++cfv->ndev->stats.rx_dropped;
302 ++cfv->ndev->stats.rx_packets;
303 cfv->ndev->stats.rx_bytes += skb_len;
310 ++cfv->stats.rx_napi_resched;
316 ++cfv->stats.rx_napi_complete;
320 if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) &&
322 vringh_notify_disable_kern(cfv->vr_rx);
328 ++cfv->stats.rx_nomem;
332 vringh_notify_enable_kern(cfv->vr_rx);
337 netdev_warn(cfv->ndev, "Bad ring, disable device\n");
338 cfv->ndev->stats.rx_dropped = riov->used - riov->i;
340 vringh_notify_disable_kern(cfv->vr_rx);
341 netif_carrier_off(cfv->ndev);
345 if (rxcnt && vringh_need_notify_kern(cfv->vr_rx) > 0)
346 vringh_notify(cfv->vr_rx);
352 struct cfv_info *cfv = vdev->priv;
354 ++cfv->stats.rx_kicks;
355 vringh_notify_disable_kern(cfv->vr_rx);
356 napi_schedule(&cfv->napi);
359 static void cfv_destroy_genpool(struct cfv_info *cfv)
361 if (cfv->alloc_addr)
362 dma_free_coherent(cfv->vdev->dev.parent->parent,
363 cfv->allocsz, cfv->alloc_addr,
364 cfv->alloc_dma);
366 if (!cfv->genpool)
368 gen_pool_free(cfv->genpool, cfv->reserved_mem,
369 cfv->reserved_size);
370 gen_pool_destroy(cfv->genpool);
371 cfv->genpool = NULL;
374 static int cfv_create_genpool(struct cfv_info *cfv)
384 cfv->allocsz = (virtqueue_get_vring_size(cfv->vq_tx) *
385 (ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10;
386 if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu)
390 if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) {
391 netdev_info(cfv->ndev, "Not enough device memory\n");
395 cfv->alloc_addr = dma_alloc_coherent(
396 cfv->vdev->dev.parent->parent,
397 cfv->allocsz, &cfv->alloc_dma,
399 if (cfv->alloc_addr)
402 cfv->allocsz = (cfv->allocsz * 3) >> 2;
405 netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n",
406 cfv->allocsz);
409 cfv->genpool = gen_pool_create(7, -1);
410 if (!cfv->genpool)
413 err = gen_pool_add_virt(cfv->genpool, (unsigned long)cfv->alloc_addr,
414 (phys_addr_t)virt_to_phys(cfv->alloc_addr),
415 cfv->allocsz, -1);
422 cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu;
423 cfv->reserved_mem = gen_pool_alloc(cfv->genpool,
424 cfv->reserved_size);
425 if (!cfv->reserved_mem) {
430 cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx);
433 cfv_destroy_genpool(cfv);
440 struct cfv_info *cfv = netdev_priv(netdev);
442 if (cfv_create_genpool(cfv))
446 napi_enable(&cfv->napi);
449 napi_schedule(&cfv->napi);
456 struct cfv_info *cfv = netdev_priv(netdev);
462 virtqueue_disable_cb(cfv->vq_tx);
463 vringh_notify_disable_kern(cfv->vr_rx);
464 napi_disable(&cfv->napi);
467 cfv_release_used_buf(cfv->vq_tx);
468 spin_lock_irqsave(&cfv->tx_lock, flags);
469 while ((buf_info = virtqueue_detach_unused_buf(cfv->vq_tx)))
470 free_buf_info(cfv, buf_info);
471 spin_unlock_irqrestore(&cfv->tx_lock, flags);
474 cfv_destroy_genpool(cfv);
479 static struct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv,
487 if (!cfv->genpool)
490 if (unlikely(cfv->tx_hr + skb->len + cfv->tx_tr > cfv->mtu)) {
491 netdev_warn(cfv->ndev, "Invalid packet len (%d > %d)\n",
492 cfv->tx_hr + skb->len + cfv->tx_tr, cfv->mtu);
501 hdr_ofs = cfv->tx_hr + info->hdr_len;
503 buf_info->size = cfv->tx_hr + skb->len + cfv->tx_tr + pad_len;
506 buf_info->vaddr = (void *)gen_pool_alloc(cfv->genpool, buf_info->size);
511 skb_copy_bits(skb, 0, buf_info->vaddr + cfv->tx_hr + pad_len, skb->len);
513 skb->len + cfv->tx_hr + cfv->rx_hr);
524 struct cfv_info *cfv = netdev_priv(netdev);
532 cfv_release_used_buf(cfv->vq_tx);
533 spin_lock_irqsave(&cfv->tx_lock, flags);
540 if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) {
542 cfv->stats.tx_full_ring++;
548 buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
550 cfv->stats.tx_no_mem++;
553 if (cfv->reserved_mem && cfv->genpool) {
554 gen_pool_free(cfv->genpool, cfv->reserved_mem,
555 cfv->reserved_size);
556 cfv->reserved_mem = 0;
557 buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
563 cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx) / 4;
565 virtqueue_enable_cb(cfv->vq_tx);
571 netdev_warn(cfv->ndev, "Out of gen_pool memory\n");
575 ret = virtqueue_add_outbuf(cfv->vq_tx, &sg, 1, buf_info, GFP_ATOMIC);
578 netdev_warn(cfv->ndev, "Failed adding buffer to TX vring:%d\n",
584 cfv->ndev->stats.tx_packets++;
585 cfv->ndev->stats.tx_bytes += skb->len;
586 spin_unlock_irqrestore(&cfv->tx_lock, flags);
589 virtqueue_kick(cfv->vq_tx);
594 spin_unlock_irqrestore(&cfv->tx_lock, flags);
595 cfv->ndev->stats.tx_dropped++;
596 free_buf_info(cfv, buf_info);
603 struct cfv_info *cfv = (struct cfv_info *)drv;
604 cfv_release_used_buf(cfv->vq_tx);
624 static inline void debugfs_init(struct cfv_info *cfv)
626 cfv->debugfs = debugfs_create_dir(netdev_name(cfv->ndev), NULL);
628 debugfs_create_u32("rx-napi-complete", 0400, cfv->debugfs,
629 &cfv->stats.rx_napi_complete);
630 debugfs_create_u32("rx-napi-resched", 0400, cfv->debugfs,
631 &cfv->stats.rx_napi_resched);
632 debugfs_create_u32("rx-nomem", 0400, cfv->debugfs,
633 &cfv->stats.rx_nomem);
634 debugfs_create_u32("rx-kicks", 0400, cfv->debugfs,
635 &cfv->stats.rx_kicks);
636 debugfs_create_u32("tx-full-ring", 0400, cfv->debugfs,
637 &cfv->stats.tx_full_ring);
638 debugfs_create_u32("tx-no-mem", 0400, cfv->debugfs,
639 &cfv->stats.tx_no_mem);
640 debugfs_create_u32("tx-kicks", 0400, cfv->debugfs,
641 &cfv->stats.tx_kicks);
642 debugfs_create_u32("tx-flow-on", 0400, cfv->debugfs,
643 &cfv->stats.tx_flow_on);
654 struct cfv_info *cfv;
662 cfv = netdev_priv(netdev);
663 cfv->vdev = vdev;
664 cfv->ndev = netdev;
666 spin_lock_init(&cfv->tx_lock);
673 err = vdev->vringh_config->find_vrhs(vdev, 1, &cfv->vr_rx, &vrh_cbs);
678 err = virtio_find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names, NULL);
685 &cfv->tx_hr);
687 &cfv->rx_hr);
689 &cfv->tx_tr);
691 &cfv->rx_tr);
693 &cfv->mtu);
695 &cfv->mru);
697 cfv->tx_hr = CFV_DEF_HEADROOM;
698 cfv->rx_hr = CFV_DEF_HEADROOM;
699 cfv->tx_tr = CFV_DEF_TAILROOM;
700 cfv->rx_tr = CFV_DEF_TAILROOM;
701 cfv->mtu = CFV_DEF_MTU_SIZE;
702 cfv->mru = CFV_DEF_MTU_SIZE;
705 netdev->needed_headroom = cfv->tx_hr;
706 netdev->needed_tailroom = cfv->tx_tr;
709 virtqueue_disable_cb(cfv->vq_tx);
711 netdev->mtu = cfv->mtu - cfv->tx_tr;
712 vdev->priv = cfv;
715 vringh_kiov_init(&cfv->ctx.riov, NULL, 0);
716 cfv->ctx.head = USHRT_MAX;
717 netif_napi_add(netdev, &cfv->napi, cfv_rx_poll, CFV_DEFAULT_QUOTA);
719 tasklet_init(&cfv->tx_release_tasklet,
721 (unsigned long)cfv);
741 debugfs_init(cfv);
745 netdev_warn(cfv->ndev, "CAIF Virtio probe failed:%d\n", err);
747 if (cfv->vr_rx)
748 vdev->vringh_config->del_vrhs(cfv->vdev);
749 if (cfv->vdev)
750 vdev->config->del_vqs(cfv->vdev);
757 struct cfv_info *cfv = vdev->priv;
760 dev_close(cfv->ndev);
763 tasklet_kill(&cfv->tx_release_tasklet);
764 debugfs_remove_recursive(cfv->debugfs);
766 vringh_kiov_cleanup(&cfv->ctx.riov);
768 vdev->vringh_config->del_vrhs(cfv->vdev);
769 cfv->vr_rx = NULL;
770 vdev->config->del_vqs(cfv->vdev);
771 unregister_netdev(cfv->ndev);