Lines Matching refs:qi
249 static int vector_advancehead(struct vector_queue *qi, int advance)
253 qi->head =
254 (qi->head + advance)
255 % qi->max_depth;
258 spin_lock(&qi->tail_lock);
259 qi->queue_depth -= advance;
265 if (qi->queue_depth == 0) {
266 qi->head = 0;
267 qi->tail = 0;
269 queue_depth = qi->queue_depth;
270 spin_unlock(&qi->tail_lock);
279 static int vector_advancetail(struct vector_queue *qi, int advance)
283 qi->tail =
284 (qi->tail + advance)
285 % qi->max_depth;
286 spin_lock(&qi->head_lock);
287 qi->queue_depth += advance;
288 queue_depth = qi->queue_depth;
289 spin_unlock(&qi->head_lock);
334 static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb)
336 struct vector_private *vp = netdev_priv(qi->dev);
339 struct mmsghdr *mmsg_vector = qi->mmsg_vector;
342 spin_lock(&qi->tail_lock);
343 spin_lock(&qi->head_lock);
344 queue_depth = qi->queue_depth;
345 spin_unlock(&qi->head_lock);
350 if (queue_depth < qi->max_depth) {
352 *(qi->skbuff_vector + qi->tail) = skb;
353 mmsg_vector += qi->tail;
364 queue_depth = vector_advancetail(qi, 1);
367 spin_unlock(&qi->tail_lock);
370 qi->dev->stats.tx_dropped++;
374 netdev_completed_queue(qi->dev, 1, packet_len);
376 spin_unlock(&qi->tail_lock);
380 static int consume_vector_skbs(struct vector_queue *qi, int count)
386 for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) {
387 skb = *(qi->skbuff_vector + skb_index);
392 *(qi->skbuff_vector + skb_index) = NULL;
395 qi->dev->stats.tx_bytes += bytes_compl;
396 qi->dev->stats.tx_packets += count;
397 netdev_completed_queue(qi->dev, count, bytes_compl);
398 return vector_advancehead(qi, count);
408 static int vector_send(struct vector_queue *qi)
410 struct vector_private *vp = netdev_priv(qi->dev);
412 int result = 0, send_len, queue_depth = qi->max_depth;
414 if (spin_trylock(&qi->head_lock)) {
415 if (spin_trylock(&qi->tail_lock)) {
417 queue_depth = qi->queue_depth;
418 spin_unlock(&qi->tail_lock);
422 send_from = qi->mmsg_vector;
423 send_from += qi->head;
425 if (send_len + qi->head > qi->max_depth)
426 send_len = qi->max_depth - qi->head;
452 consume_vector_skbs(qi, result);
462 netif_trans_update(qi->dev);
463 netif_wake_queue(qi->dev);
473 spin_unlock(&qi->head_lock);
484 static void destroy_queue(struct vector_queue *qi)
488 struct vector_private *vp = netdev_priv(qi->dev);
491 if (qi == NULL)
496 if (qi->skbuff_vector != NULL) {
497 for (i = 0; i < qi->max_depth; i++) {
498 if (*(qi->skbuff_vector + i) != NULL)
499 dev_kfree_skb_any(*(qi->skbuff_vector + i));
501 kfree(qi->skbuff_vector);
504 if (qi->mmsg_vector != NULL) {
505 mmsg_vector = qi->mmsg_vector;
506 for (i = 0; i < qi->max_depth; i++) {
516 kfree(qi->mmsg_vector);
518 kfree(qi);
677 static void prep_queue_for_rx(struct vector_queue *qi)
679 struct vector_private *vp = netdev_priv(qi->dev);
680 struct mmsghdr *mmsg_vector = qi->mmsg_vector;
681 void **skbuff_vector = qi->skbuff_vector;
684 if (qi->queue_depth == 0)
686 for (i = 0; i < qi->queue_depth; i++) {
696 qi->queue_depth = 0;
963 struct vector_queue *qi = vp->rx_queue;
965 struct mmsghdr *mmsg_vector = qi->mmsg_vector;
966 void **skbuff_vector = qi->skbuff_vector;
973 prep_queue_for_rx(qi);
978 vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
991 qi->queue_depth = packet_count;