Lines Matching defs:iovb

141 static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb);
201 struct sk_buff *iovb;
233 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) {
234 dev_kfree_skb_any(iovb);
740 struct sk_buff *iovb;
741 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
742 if (iovb == NULL) {
750 NS_PRV_BUFTYPE(iovb) = BUF_NONE;
751 skb_queue_tail(&card->iovpool.queue, iovb);
819 struct sk_buff *iovb;
820 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
821 dev_kfree_skb_any(iovb);
1441 struct sk_buff *iovb;
1451 iovb = vc->rx_iov;
1452 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
1453 NS_PRV_IOVCNT(iovb));
1454 NS_PRV_IOVCNT(iovb) = 0;
1456 recycle_iov_buf(card, iovb);
1972 struct sk_buff *iovb;
2065 if ((iovb = vc->rx_iov) == NULL) {
2066 iovb = skb_dequeue(&(card->iovpool.queue));
2067 if (iovb == NULL) { /* No buffers in the queue */
2068 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC);
2069 if (iovb == NULL) {
2076 NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2081 NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2086 vc->rx_iov = iovb;
2087 NS_PRV_IOVCNT(iovb) = 0;
2088 iovb->len = 0;
2089 iovb->data = iovb->head;
2090 skb_reset_tail_pointer(iovb);
2094 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
2097 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2099 NS_PRV_IOVCNT(iovb) = 0;
2100 iovb->len = 0;
2101 iovb->data = iovb->head;
2102 skb_reset_tail_pointer(iovb);
2104 iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++];
2107 iovb->len += iov->iov_len;
2110 if (NS_PRV_IOVCNT(iovb) == 1) {
2119 recycle_iov_buf(card, iovb);
2122 } else { /* NS_PRV_IOVCNT(iovb) >= 2 */
2130 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2131 NS_PRV_IOVCNT(iovb));
2133 recycle_iov_buf(card, iovb);
2146 len + 8 > iovb->len || len + (47 + 8) < iovb->len) {
2148 if (len + 8 > iovb->len || len + (47 + 8) < iovb->len)
2153 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2154 NS_PRV_IOVCNT(iovb));
2156 recycle_iov_buf(card, iovb);
2162 if (NS_PRV_IOVCNT(iovb) == 1) { /* Just a small buffer */
2175 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
2234 iovb->data,
2235 NS_PRV_IOVCNT(iovb));
2237 recycle_iov_buf(card, iovb);
2272 iov = (struct iovec *)iovb->data;
2276 NS_PRV_IOVCNT(iovb));
2295 for (j = 1; j < NS_PRV_IOVCNT(iovb); j++) {
2321 recycle_iov_buf(card, iovb);
2342 static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb)
2345 skb_queue_tail(&card->iovpool.queue, iovb);
2348 dev_kfree_skb_any(iovb);
2635 struct sk_buff *iovb;
2638 iovb = skb_dequeue(&card->iovpool.queue);
2641 if (iovb == NULL)
2646 dev_kfree_skb_any(iovb);
2650 struct sk_buff *iovb;
2652 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
2653 if (iovb == NULL)
2655 NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2657 skb_queue_tail(&card->iovpool.queue, iovb);