Lines Matching defs:rxq
3383 struct ipw_rx_queue *rxq)
3388 spin_lock_irqsave(&rxq->lock, flags);
3390 INIT_LIST_HEAD(&rxq->rx_free);
3391 INIT_LIST_HEAD(&rxq->rx_used);
3397 if (rxq->pool[i].skb != NULL) {
3399 rxq->pool[i].dma_addr,
3401 dev_kfree_skb_irq(rxq->pool[i].skb);
3402 rxq->pool[i].skb = NULL;
3404 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3409 rxq->read = rxq->write = 0;
3410 rxq->free_count = 0;
3411 spin_unlock_irqrestore(&rxq->lock, flags);
3475 if (!priv->rxq)
3476 priv->rxq = ipw_rx_queue_alloc(priv);
3478 ipw_rx_queue_reset(priv, priv->rxq);
3479 if (!priv->rxq) {
3598 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3609 if (priv->rxq) {
3610 ipw_rx_queue_free(priv, priv->rxq);
3611 priv->rxq = NULL;
5049 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5050 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5051 * to replensish the ipw->rxq->rx_free.
5053 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5056 * detached from the ipw->rxq. The driver 'processed' index is updated.
5057 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5058 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5094 struct ipw_rx_queue *rxq = priv->rxq;
5100 spin_lock_irqsave(&rxq->lock, flags);
5101 write = rxq->write;
5102 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5103 element = rxq->rx_free.next;
5107 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5109 rxq->queue[rxq->write] = rxb;
5110 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5111 rxq->free_count--;
5113 spin_unlock_irqrestore(&rxq->lock, flags);
5117 if (rxq->free_count <= RX_LOW_WATERMARK)
5121 if (write != rxq->write)
5122 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5134 struct ipw_rx_queue *rxq = priv->rxq;
5139 spin_lock_irqsave(&rxq->lock, flags);
5140 while (!list_empty(&rxq->rx_used)) {
5141 element = rxq->rx_used.next;
5158 list_add_tail(&rxb->list, &rxq->rx_free);
5159 rxq->free_count++;
5161 spin_unlock_irqrestore(&rxq->lock, flags);
5180 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5184 if (!rxq)
5188 if (rxq->pool[i].skb != NULL) {
5190 rxq->pool[i].dma_addr,
5192 dev_kfree_skb(rxq->pool[i].skb);
5196 kfree(rxq);
5201 struct ipw_rx_queue *rxq;
5204 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5205 if (unlikely(!rxq)) {
5209 spin_lock_init(&rxq->lock);
5210 INIT_LIST_HEAD(&rxq->rx_free);
5211 INIT_LIST_HEAD(&rxq->rx_used);
5215 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5219 rxq->read = rxq->write = 0;
5220 rxq->free_count = 0;
5222 return rxq;
8210 i = priv->rxq->read;
8212 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8216 rxb = priv->rxq->queue[i];
8221 priv->rxq->queue[i] = NULL;
8378 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8385 priv->rxq->read = i;
8391 priv->rxq->read = i;
11737 if (priv->rxq) {
11738 ipw_rx_queue_free(priv, priv->rxq);
11739 priv->rxq = NULL;