Lines Matching refs:qi
248 static int vector_advancehead(struct vector_queue *qi, int advance)
252 qi->head =
253 (qi->head + advance)
254 % qi->max_depth;
257 spin_lock(&qi->tail_lock);
258 qi->queue_depth -= advance;
264 if (qi->queue_depth == 0) {
265 qi->head = 0;
266 qi->tail = 0;
268 queue_depth = qi->queue_depth;
269 spin_unlock(&qi->tail_lock);
278 static int vector_advancetail(struct vector_queue *qi, int advance)
282 qi->tail =
283 (qi->tail + advance)
284 % qi->max_depth;
285 spin_lock(&qi->head_lock);
286 qi->queue_depth += advance;
287 queue_depth = qi->queue_depth;
288 spin_unlock(&qi->head_lock);
333 static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb)
335 struct vector_private *vp = netdev_priv(qi->dev);
338 struct mmsghdr *mmsg_vector = qi->mmsg_vector;
341 spin_lock(&qi->tail_lock);
342 spin_lock(&qi->head_lock);
343 queue_depth = qi->queue_depth;
344 spin_unlock(&qi->head_lock);
349 if (queue_depth < qi->max_depth) {
351 *(qi->skbuff_vector + qi->tail) = skb;
352 mmsg_vector += qi->tail;
363 queue_depth = vector_advancetail(qi, 1);
366 spin_unlock(&qi->tail_lock);
369 qi->dev->stats.tx_dropped++;
373 netdev_completed_queue(qi->dev, 1, packet_len);
375 spin_unlock(&qi->tail_lock);
379 static int consume_vector_skbs(struct vector_queue *qi, int count)
385 for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) {
386 skb = *(qi->skbuff_vector + skb_index);
391 *(qi->skbuff_vector + skb_index) = NULL;
394 qi->dev->stats.tx_bytes += bytes_compl;
395 qi->dev->stats.tx_packets += count;
396 netdev_completed_queue(qi->dev, count, bytes_compl);
397 return vector_advancehead(qi, count);
407 static int vector_send(struct vector_queue *qi)
409 struct vector_private *vp = netdev_priv(qi->dev);
411 int result = 0, send_len, queue_depth = qi->max_depth;
413 if (spin_trylock(&qi->head_lock)) {
414 if (spin_trylock(&qi->tail_lock)) {
416 queue_depth = qi->queue_depth;
417 spin_unlock(&qi->tail_lock);
421 send_from = qi->mmsg_vector;
422 send_from += qi->head;
424 if (send_len + qi->head > qi->max_depth)
425 send_len = qi->max_depth - qi->head;
451 consume_vector_skbs(qi, result);
461 netif_wake_queue(qi->dev);
471 spin_unlock(&qi->head_lock);
480 static void destroy_queue(struct vector_queue *qi)
484 struct vector_private *vp = netdev_priv(qi->dev);
487 if (qi == NULL)
492 if (qi->skbuff_vector != NULL) {
493 for (i = 0; i < qi->max_depth; i++) {
494 if (*(qi->skbuff_vector + i) != NULL)
495 dev_kfree_skb_any(*(qi->skbuff_vector + i));
497 kfree(qi->skbuff_vector);
500 if (qi->mmsg_vector != NULL) {
501 mmsg_vector = qi->mmsg_vector;
502 for (i = 0; i < qi->max_depth; i++) {
512 kfree(qi->mmsg_vector);
514 kfree(qi);
673 static void prep_queue_for_rx(struct vector_queue *qi)
675 struct vector_private *vp = netdev_priv(qi->dev);
676 struct mmsghdr *mmsg_vector = qi->mmsg_vector;
677 void **skbuff_vector = qi->skbuff_vector;
680 if (qi->queue_depth == 0)
682 for (i = 0; i < qi->queue_depth; i++) {
692 qi->queue_depth = 0;
959 struct vector_queue *qi = vp->rx_queue;
961 struct mmsghdr *mmsg_vector = qi->mmsg_vector;
962 void **skbuff_vector = qi->skbuff_vector;
969 prep_queue_for_rx(qi);
973 if (budget > qi->max_depth)
974 budget = qi->max_depth;
977 vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
990 qi->queue_depth = packet_count;