Lines Matching defs:queue

12 	Abstract: rt2x00 queue specific routines.
25 struct data_queue *queue = entry->queue;
26 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
37 frame_size = queue->data_size + queue->desc_size + queue->winfo_size;
95 struct device *dev = entry->queue->rt2x00dev->dev;
112 struct device *dev = entry->queue->rt2x00dev->dev;
491 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
496 * a queue corruption!
501 "Corrupt queue %d, accessing entry which is not ours\n"
503 entry->queue->qid, DRV_PROJECT);
532 struct data_queue *queue = entry->queue;
534 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
540 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry);
543 static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
547 * Check if we need to kick the queue, there are however a few rules
553 * in the queue are less then a certain threshold.
555 if (rt2x00queue_threshold(queue) ||
557 queue->rt2x00dev->ops->lib->kick_queue(queue);
562 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
602 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
617 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
642 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV))
656 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD))
658 else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA))
664 spin_lock(&queue->tx_lock);
666 if (unlikely(rt2x00queue_full(queue))) {
667 rt2x00_dbg(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
668 queue->qid);
673 entry = rt2x00queue_get_entry(queue, Q_INDEX);
677 rt2x00_err(queue->rt2x00dev,
678 "Arrived at non-free entry in the non-full queue %d\n"
680 queue->qid, DRV_PROJECT);
688 * It could be possible that the queue was corrupted and this
708 rt2x00queue_kick_tx_queue(queue, &txdesc);
712 * Pausing queue has to be serialized with rt2x00lib_txdone(), so we
713 * do this under queue->tx_lock. Bottom halve was already disabled
716 if (rt2x00queue_threshold(queue))
717 rt2x00queue_pause_queue(queue);
719 spin_unlock(&queue->tx_lock);
738 * since the beacon queue will get stopped anyway).
787 bool rt2x00queue_for_each_entry(struct data_queue *queue,
800 rt2x00_err(queue->rt2x00dev,
812 spin_lock_irqsave(&queue->index_lock, irqflags);
813 index_start = queue->index[start];
814 index_end = queue->index[end];
815 spin_unlock_irqrestore(&queue->index_lock, irqflags);
823 if (fn(&queue->entries[i], data))
827 for (i = index_start; i < queue->limit; i++) {
828 if (fn(&queue->entries[i], data))
833 if (fn(&queue->entries[i], data))
842 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
849 rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n",
854 spin_lock_irqsave(&queue->index_lock, irqflags);
856 entry = &queue->entries[queue->index[index]];
858 spin_unlock_irqrestore(&queue->index_lock, irqflags);
866 struct data_queue *queue = entry->queue;
870 rt2x00_err(queue->rt2x00dev,
875 spin_lock_irqsave(&queue->index_lock, irqflags);
877 queue->index[index]++;
878 if (queue->index[index] >= queue->limit)
879 queue->index[index] = 0;
884 queue->length++;
886 queue->length--;
887 queue->count++;
890 spin_unlock_irqrestore(&queue->index_lock, irqflags);
893 static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
895 switch (queue->qid) {
901 * For TX queues, we have to disable the queue
904 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
910 void rt2x00queue_pause_queue(struct data_queue *queue)
912 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
913 !test_bit(QUEUE_STARTED, &queue->flags) ||
914 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
917 rt2x00queue_pause_queue_nocheck(queue);
921 void rt2x00queue_unpause_queue(struct data_queue *queue)
923 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
924 !test_bit(QUEUE_STARTED, &queue->flags) ||
925 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
928 switch (queue->qid) {
934 * For TX queues, we have to enable the queue
937 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
941 * For RX we need to kick the queue now in order to
944 queue->rt2x00dev->ops->lib->kick_queue(queue);
952 void rt2x00queue_start_queue(struct data_queue *queue)
954 mutex_lock(&queue->status_lock);
956 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
957 test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
958 mutex_unlock(&queue->status_lock);
962 set_bit(QUEUE_PAUSED, &queue->flags);
964 queue->rt2x00dev->ops->lib->start_queue(queue);
966 rt2x00queue_unpause_queue(queue);
968 mutex_unlock(&queue->status_lock);
972 void rt2x00queue_stop_queue(struct data_queue *queue)
974 mutex_lock(&queue->status_lock);
976 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
977 mutex_unlock(&queue->status_lock);
981 rt2x00queue_pause_queue_nocheck(queue);
983 queue->rt2x00dev->ops->lib->stop_queue(queue);
985 mutex_unlock(&queue->status_lock);
989 void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
992 (queue->qid == QID_AC_VO) ||
993 (queue->qid == QID_AC_VI) ||
994 (queue->qid == QID_AC_BE) ||
995 (queue->qid == QID_AC_BK);
997 if (rt2x00queue_empty(queue))
1003 * to the queue to make sure the hardware will
1007 queue->rt2x00dev->ops->lib->kick_queue(queue);
1012 * alternative which just waits for the queue to become empty.
1014 if (likely(queue->rt2x00dev->ops->lib->flush_queue))
1015 queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
1018 * The queue flush has failed...
1020 if (unlikely(!rt2x00queue_empty(queue)))
1021 rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
1022 queue->qid);
1028 struct data_queue *queue;
1032 * for each queue after is has been properly initialized.
1034 tx_queue_for_each(rt2x00dev, queue)
1035 rt2x00queue_start_queue(queue);
1043 struct data_queue *queue;
1053 tx_queue_for_each(rt2x00dev, queue)
1054 rt2x00queue_stop_queue(queue);
1062 struct data_queue *queue;
1064 tx_queue_for_each(rt2x00dev, queue)
1065 rt2x00queue_flush_queue(queue, drop);
1071 static void rt2x00queue_reset(struct data_queue *queue)
1076 spin_lock_irqsave(&queue->index_lock, irqflags);
1078 queue->count = 0;
1079 queue->length = 0;
1082 queue->index[i] = 0;
1084 spin_unlock_irqrestore(&queue->index_lock, irqflags);
1089 struct data_queue *queue;
1092 queue_for_each(rt2x00dev, queue) {
1093 rt2x00queue_reset(queue);
1095 for (i = 0; i < queue->limit; i++)
1096 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
1100 static int rt2x00queue_alloc_entries(struct data_queue *queue)
1106 rt2x00queue_reset(queue);
1109 * Allocate all queue entries.
1111 entry_size = sizeof(*entries) + queue->priv_size;
1112 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
1120 for (i = 0; i < queue->limit; i++) {
1122 entries[i].queue = queue;
1126 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1127 sizeof(*entries), queue->priv_size);
1132 queue->entries = entries;
1137 static void rt2x00queue_free_skbs(struct data_queue *queue)
1141 if (!queue->entries)
1144 for (i = 0; i < queue->limit; i++) {
1145 rt2x00queue_free_skb(&queue->entries[i]);
1149 static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1154 for (i = 0; i < queue->limit; i++) {
1155 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
1158 queue->entries[i].skb = skb;
1166 struct data_queue *queue;
1173 tx_queue_for_each(rt2x00dev, queue) {
1174 status = rt2x00queue_alloc_entries(queue);
1205 struct data_queue *queue;
1209 queue_for_each(rt2x00dev, queue) {
1210 kfree(queue->entries);
1211 queue->entries = NULL;
1216 struct data_queue *queue, enum data_queue_qid qid)
1218 mutex_init(&queue->status_lock);
1219 spin_lock_init(&queue->tx_lock);
1220 spin_lock_init(&queue->index_lock);
1222 queue->rt2x00dev = rt2x00dev;
1223 queue->qid = qid;
1224 queue->txop = 0;
1225 queue->aifs = 2;
1226 queue->cw_min = 5;
1227 queue->cw_max = 10;
1229 rt2x00dev->ops->queue_init(queue);
1231 queue->threshold = DIV_ROUND_UP(queue->limit, 10);
1236 struct data_queue *queue;
1250 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1251 if (!queue)
1257 rt2x00dev->rx = queue;
1258 rt2x00dev->tx = &queue[1];
1259 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
1260 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
1263 * Initialize queue parameters.
1274 tx_queue_for_each(rt2x00dev, queue)
1275 rt2x00queue_init(rt2x00dev, queue, qid++);