Lines Matching defs:rxq

877  * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
878 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
879 * to replenish the iwl->rxq->rx_free.
881 * iwl->rxq is replenished and the READ IDX is updated (updating the
884 * detached from the iwl->rxq. The driver 'processed' idx is updated.
885 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
886 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
933 struct il_rx_queue *rxq = &il->rxq;
938 spin_lock_irqsave(&rxq->lock, flags);
939 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
941 element = rxq->rx_free.next;
946 rxq->bd[rxq->write] =
948 rxq->queue[rxq->write] = rxb;
949 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
950 rxq->free_count--;
952 spin_unlock_irqrestore(&rxq->lock, flags);
955 if (rxq->free_count <= RX_LOW_WATERMARK)
960 if (rxq->write_actual != (rxq->write & ~0x7) ||
961 abs(rxq->write - rxq->read) > 7) {
962 spin_lock_irqsave(&rxq->lock, flags);
963 rxq->need_update = 1;
964 spin_unlock_irqrestore(&rxq->lock, flags);
965 il_rx_queue_update_write_ptr(il, rxq);
980 struct il_rx_queue *rxq = &il->rxq;
989 spin_lock_irqsave(&rxq->lock, flags);
990 if (list_empty(&rxq->rx_used)) {
991 spin_unlock_irqrestore(&rxq->lock, flags);
994 spin_unlock_irqrestore(&rxq->lock, flags);
996 if (rxq->free_count > RX_LOW_WATERMARK)
1007 if (rxq->free_count <= RX_LOW_WATERMARK &&
1011 priority, rxq->free_count);
1029 spin_lock_irqsave(&rxq->lock, flags);
1031 if (list_empty(&rxq->rx_used)) {
1032 spin_unlock_irqrestore(&rxq->lock, flags);
1040 element = rxq->rx_used.next;
1046 list_add_tail(&rxb->list, &rxq->rx_free);
1047 rxq->free_count++;
1050 spin_unlock_irqrestore(&rxq->lock, flags);
1055 il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
1059 spin_lock_irqsave(&rxq->lock, flags);
1060 INIT_LIST_HEAD(&rxq->rx_free);
1061 INIT_LIST_HEAD(&rxq->rx_used);
1066 if (rxq->pool[i].page != NULL) {
1067 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
1070 __il_free_pages(il, rxq->pool[i].page);
1071 rxq->pool[i].page = NULL;
1073 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1078 rxq->read = rxq->write = 0;
1079 rxq->write_actual = 0;
1080 rxq->free_count = 0;
1081 spin_unlock_irqrestore(&rxq->lock, flags);
1111 il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
1115 if (rxq->pool[i].page != NULL) {
1116 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
1119 __il_free_pages(il, rxq->pool[i].page);
1120 rxq->pool[i].page = NULL;
1124 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1125 rxq->bd_dma);
1127 rxq->rb_stts, rxq->rb_stts_dma);
1128 rxq->bd = NULL;
1129 rxq->rb_stts = NULL;
1182 struct il_rx_queue *rxq = &il->rxq;
1192 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
1193 i = rxq->read;
1196 total_empty = r - rxq->write_actual;
1209 rxb = rxq->queue[i];
1216 rxq->queue[i] = NULL;
1262 spin_lock_irqsave(&rxq->lock, flags);
1272 list_add_tail(&rxb->list, &rxq->rx_used);
1274 list_add_tail(&rxb->list, &rxq->rx_free);
1275 rxq->free_count++;
1278 list_add_tail(&rxb->list, &rxq->rx_used);
1280 spin_unlock_irqrestore(&rxq->lock, flags);
1288 rxq->read = i;
1296 rxq->read = i;
1466 il_rx_queue_update_write_ptr(il, &il->rxq);
3827 if (il->rxq.bd)
3828 il3945_rx_queue_free(il, &il->rxq);