Lines Matching refs:queue

26 static inline void __cw1200_queue_lock(struct cw1200_queue *queue)  in __cw1200_queue_lock()  argument
28 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock()
29 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock()
31 queue->queue_id); in __cw1200_queue_lock()
32 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock()
36 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument
38 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock()
39 BUG_ON(!queue->tx_locked_cnt); in __cw1200_queue_unlock()
40 if (--queue->tx_locked_cnt == 0) { in __cw1200_queue_unlock()
42 queue->queue_id); in __cw1200_queue_unlock()
43 ieee80211_wake_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_unlock()
88 static void __cw1200_queue_gc(struct cw1200_queue *queue, in __cw1200_queue_gc() argument
92 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_gc()
96 list_for_each_entry_safe(item, tmp, &queue->queue, head) { in __cw1200_queue_gc()
97 if (jiffies - item->queue_timestamp < queue->ttl) in __cw1200_queue_gc()
99 --queue->num_queued; in __cw1200_queue_gc()
100 --queue->link_map_cache[item->txpriv.link_id]; in __cw1200_queue_gc()
109 list_move_tail(&item->head, &queue->free_pool); in __cw1200_queue_gc()
115 if (queue->overfull) { in __cw1200_queue_gc()
116 if (queue->num_queued <= (queue->capacity >> 1)) { in __cw1200_queue_gc()
117 queue->overfull = false; in __cw1200_queue_gc()
119 __cw1200_queue_unlock(queue); in __cw1200_queue_gc()
121 unsigned long tmo = item->queue_timestamp + queue->ttl; in __cw1200_queue_gc()
122 mod_timer(&queue->gc, tmo); in __cw1200_queue_gc()
132 struct cw1200_queue *queue = in cw1200_queue_gc() local
133 from_timer(queue, t, gc); in cw1200_queue_gc()
135 spin_lock_bh(&queue->lock); in cw1200_queue_gc()
136 __cw1200_queue_gc(queue, &list, true); in cw1200_queue_gc()
137 spin_unlock_bh(&queue->lock); in cw1200_queue_gc()
138 cw1200_queue_post_gc(queue->stats, &list); in cw1200_queue_gc()
161 int cw1200_queue_init(struct cw1200_queue *queue, in cw1200_queue_init() argument
169 memset(queue, 0, sizeof(*queue)); in cw1200_queue_init()
170 queue->stats = stats; in cw1200_queue_init()
171 queue->capacity = capacity; in cw1200_queue_init()
172 queue->queue_id = queue_id; in cw1200_queue_init()
173 queue->ttl = ttl; in cw1200_queue_init()
174 INIT_LIST_HEAD(&queue->queue); in cw1200_queue_init()
175 INIT_LIST_HEAD(&queue->pending); in cw1200_queue_init()
176 INIT_LIST_HEAD(&queue->free_pool); in cw1200_queue_init()
177 spin_lock_init(&queue->lock); in cw1200_queue_init()
178 timer_setup(&queue->gc, cw1200_queue_gc, 0); in cw1200_queue_init()
180 queue->pool = kcalloc(capacity, sizeof(struct cw1200_queue_item), in cw1200_queue_init()
182 if (!queue->pool) in cw1200_queue_init()
185 queue->link_map_cache = kcalloc(stats->map_capacity, sizeof(int), in cw1200_queue_init()
187 if (!queue->link_map_cache) { in cw1200_queue_init()
188 kfree(queue->pool); in cw1200_queue_init()
189 queue->pool = NULL; in cw1200_queue_init()
194 list_add_tail(&queue->pool[i].head, &queue->free_pool); in cw1200_queue_init()
199 int cw1200_queue_clear(struct cw1200_queue *queue) in cw1200_queue_clear() argument
203 struct cw1200_queue_stats *stats = queue->stats; in cw1200_queue_clear()
206 spin_lock_bh(&queue->lock); in cw1200_queue_clear()
207 queue->generation++; in cw1200_queue_clear()
208 list_splice_tail_init(&queue->queue, &queue->pending); in cw1200_queue_clear()
209 list_for_each_entry_safe(item, tmp, &queue->pending, head) { in cw1200_queue_clear()
213 list_move_tail(&item->head, &queue->free_pool); in cw1200_queue_clear()
215 queue->num_queued = 0; in cw1200_queue_clear()
216 queue->num_pending = 0; in cw1200_queue_clear()
220 stats->num_queued -= queue->link_map_cache[i]; in cw1200_queue_clear()
221 stats->link_map_cache[i] -= queue->link_map_cache[i]; in cw1200_queue_clear()
222 queue->link_map_cache[i] = 0; in cw1200_queue_clear()
225 if (queue->overfull) { in cw1200_queue_clear()
226 queue->overfull = false; in cw1200_queue_clear()
227 __cw1200_queue_unlock(queue); in cw1200_queue_clear()
229 spin_unlock_bh(&queue->lock); in cw1200_queue_clear()
241 void cw1200_queue_deinit(struct cw1200_queue *queue) in cw1200_queue_deinit() argument
243 cw1200_queue_clear(queue); in cw1200_queue_deinit()
244 del_timer_sync(&queue->gc); in cw1200_queue_deinit()
245 INIT_LIST_HEAD(&queue->free_pool); in cw1200_queue_deinit()
246 kfree(queue->pool); in cw1200_queue_deinit()
247 kfree(queue->link_map_cache); in cw1200_queue_deinit()
248 queue->pool = NULL; in cw1200_queue_deinit()
249 queue->link_map_cache = NULL; in cw1200_queue_deinit()
250 queue->capacity = 0; in cw1200_queue_deinit()
253 size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue, in cw1200_queue_get_num_queued() argument
258 size_t map_capacity = queue->stats->map_capacity; in cw1200_queue_get_num_queued()
263 spin_lock_bh(&queue->lock); in cw1200_queue_get_num_queued()
265 ret = queue->num_queued - queue->num_pending; in cw1200_queue_get_num_queued()
270 ret += queue->link_map_cache[i]; in cw1200_queue_get_num_queued()
273 spin_unlock_bh(&queue->lock); in cw1200_queue_get_num_queued()
277 int cw1200_queue_put(struct cw1200_queue *queue, in cw1200_queue_put() argument
282 struct cw1200_queue_stats *stats = queue->stats; in cw1200_queue_put()
284 if (txpriv->link_id >= queue->stats->map_capacity) in cw1200_queue_put()
287 spin_lock_bh(&queue->lock); in cw1200_queue_put()
288 if (!WARN_ON(list_empty(&queue->free_pool))) { in cw1200_queue_put()
290 &queue->free_pool, struct cw1200_queue_item, head); in cw1200_queue_put()
293 list_move_tail(&item->head, &queue->queue); in cw1200_queue_put()
297 item->packet_id = cw1200_queue_mk_packet_id(queue->generation, in cw1200_queue_put()
298 queue->queue_id, in cw1200_queue_put()
300 item - queue->pool); in cw1200_queue_put()
303 ++queue->num_queued; in cw1200_queue_put()
304 ++queue->link_map_cache[txpriv->link_id]; in cw1200_queue_put()
314 if (queue->overfull == false && in cw1200_queue_put()
315 queue->num_queued >= in cw1200_queue_put()
316 (queue->capacity - (num_present_cpus() - 1))) { in cw1200_queue_put()
317 queue->overfull = true; in cw1200_queue_put()
318 __cw1200_queue_lock(queue); in cw1200_queue_put()
319 mod_timer(&queue->gc, jiffies); in cw1200_queue_put()
324 spin_unlock_bh(&queue->lock); in cw1200_queue_put()
328 int cw1200_queue_get(struct cw1200_queue *queue, in cw1200_queue_get() argument
336 struct cw1200_queue_stats *stats = queue->stats; in cw1200_queue_get()
339 spin_lock_bh(&queue->lock); in cw1200_queue_get()
340 list_for_each_entry(item, &queue->queue, head) { in cw1200_queue_get()
352 list_move_tail(&item->head, &queue->pending); in cw1200_queue_get()
353 ++queue->num_pending; in cw1200_queue_get()
354 --queue->link_map_cache[item->txpriv.link_id]; in cw1200_queue_get()
363 spin_unlock_bh(&queue->lock); in cw1200_queue_get()
369 int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id) in cw1200_queue_requeue() argument
374 struct cw1200_queue_stats *stats = queue->stats; in cw1200_queue_requeue()
379 item = &queue->pool[item_id]; in cw1200_queue_requeue()
381 spin_lock_bh(&queue->lock); in cw1200_queue_requeue()
382 BUG_ON(queue_id != queue->queue_id); in cw1200_queue_requeue()
383 if (queue_generation != queue->generation) { in cw1200_queue_requeue()
385 } else if (item_id >= (unsigned) queue->capacity) { in cw1200_queue_requeue()
392 --queue->num_pending; in cw1200_queue_requeue()
393 ++queue->link_map_cache[item->txpriv.link_id]; in cw1200_queue_requeue()
405 list_move(&item->head, &queue->queue); in cw1200_queue_requeue()
407 spin_unlock_bh(&queue->lock); in cw1200_queue_requeue()
411 int cw1200_queue_requeue_all(struct cw1200_queue *queue) in cw1200_queue_requeue_all() argument
414 struct cw1200_queue_stats *stats = queue->stats; in cw1200_queue_requeue_all()
415 spin_lock_bh(&queue->lock); in cw1200_queue_requeue_all()
417 list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) { in cw1200_queue_requeue_all()
418 --queue->num_pending; in cw1200_queue_requeue_all()
419 ++queue->link_map_cache[item->txpriv.link_id]; in cw1200_queue_requeue_all()
427 item->packet_id = cw1200_queue_mk_packet_id(queue->generation, in cw1200_queue_requeue_all()
428 queue->queue_id, in cw1200_queue_requeue_all()
430 item - queue->pool); in cw1200_queue_requeue_all()
431 list_move(&item->head, &queue->queue); in cw1200_queue_requeue_all()
433 spin_unlock_bh(&queue->lock); in cw1200_queue_requeue_all()
438 int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id) in cw1200_queue_remove() argument
443 struct cw1200_queue_stats *stats = queue->stats; in cw1200_queue_remove()
450 item = &queue->pool[item_id]; in cw1200_queue_remove()
452 spin_lock_bh(&queue->lock); in cw1200_queue_remove()
453 BUG_ON(queue_id != queue->queue_id); in cw1200_queue_remove()
454 if (queue_generation != queue->generation) { in cw1200_queue_remove()
456 } else if (item_id >= (unsigned) queue->capacity) { in cw1200_queue_remove()
466 --queue->num_pending; in cw1200_queue_remove()
467 --queue->num_queued; in cw1200_queue_remove()
468 ++queue->num_sent; in cw1200_queue_remove()
473 list_move(&item->head, &queue->free_pool); in cw1200_queue_remove()
475 if (queue->overfull && in cw1200_queue_remove()
476 (queue->num_queued <= (queue->capacity >> 1))) { in cw1200_queue_remove()
477 queue->overfull = false; in cw1200_queue_remove()
478 __cw1200_queue_unlock(queue); in cw1200_queue_remove()
481 spin_unlock_bh(&queue->lock); in cw1200_queue_remove()
489 int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, in cw1200_queue_get_skb() argument
499 item = &queue->pool[item_id]; in cw1200_queue_get_skb()
501 spin_lock_bh(&queue->lock); in cw1200_queue_get_skb()
502 BUG_ON(queue_id != queue->queue_id); in cw1200_queue_get_skb()
503 if (queue_generation != queue->generation) { in cw1200_queue_get_skb()
505 } else if (item_id >= (unsigned) queue->capacity) { in cw1200_queue_get_skb()
515 spin_unlock_bh(&queue->lock); in cw1200_queue_get_skb()
519 void cw1200_queue_lock(struct cw1200_queue *queue) in cw1200_queue_lock() argument
521 spin_lock_bh(&queue->lock); in cw1200_queue_lock()
522 __cw1200_queue_lock(queue); in cw1200_queue_lock()
523 spin_unlock_bh(&queue->lock); in cw1200_queue_lock()
526 void cw1200_queue_unlock(struct cw1200_queue *queue) in cw1200_queue_unlock() argument
528 spin_lock_bh(&queue->lock); in cw1200_queue_unlock()
529 __cw1200_queue_unlock(queue); in cw1200_queue_unlock()
530 spin_unlock_bh(&queue->lock); in cw1200_queue_unlock()
533 bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, in cw1200_queue_get_xmit_timestamp() argument
540 spin_lock_bh(&queue->lock); in cw1200_queue_get_xmit_timestamp()
541 ret = !list_empty(&queue->pending); in cw1200_queue_get_xmit_timestamp()
543 list_for_each_entry(item, &queue->pending, head) { in cw1200_queue_get_xmit_timestamp()
550 spin_unlock_bh(&queue->lock); in cw1200_queue_get_xmit_timestamp()