Lines Matching defs:cache
2123 void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache)
2127 for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
2128 skb_queue_head_init(&cache->entries[i].skb_list);
2131 void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache)
2135 for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
2136 __skb_queue_purge(&cache->entries[i].skb_list);
2140 ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache,
2146 entry = &cache->entries[cache->next++];
2147 if (cache->next >= IEEE80211_FRAGMENT_MAX)
2148 cache->next = 0;
2165 ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache,
2172 idx = cache->next;
2181 entry = &cache->entries[idx];
2222 struct ieee80211_fragment_cache *cache = &rx->sdata->frags;
2241 cache = &rx->sta->frags;
2264 entry = ieee80211_reassemble_add(cache, frag, seq,
2296 * fragment cache. Add this fragment to the end of the pending entry.
2298 entry = ieee80211_reassemble_find(cache, frag, seq,
2317 /* Prevent mixed key and fragment cache attacks */
2337 /* Drop this as a mixed key or fragment cache attack, even
2858 /* flush fast xmit cache if the address path changed */