Lines Matching defs:offload

9 #include <linux/can/rx-offload.h>
24 can_rx_offload_le(struct can_rx_offload *offload,
27 if (offload->inc)
34 can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
36 if (offload->inc)
44 struct can_rx_offload *offload = container_of(napi,
47 struct net_device *dev = offload->dev;
53 (skb = skb_dequeue(&offload->skb_queue))) {
69 if (!skb_queue_empty(&offload->skb_queue))
70 napi_reschedule(&offload->napi);
121 * @offload: pointer to rx_offload context
141 can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
149 if (unlikely(skb_queue_len(&offload->skb_queue) >
150 offload->skb_queue_len_max))
153 skb = offload->mailbox_read(offload, n, &timestamp, drop);
162 offload->dev->stats.rx_dropped++;
163 offload->dev->stats.rx_fifo_errors++;
175 int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
181 for (i = offload->mb_first;
182 can_rx_offload_le(offload, i, offload->mb_last);
183 can_rx_offload_inc(offload, &i)) {
189 skb = can_rx_offload_offload_one(offload, i);
193 __skb_queue_add_sort(&offload->skb_irq_queue, skb,
202 int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
208 skb = can_rx_offload_offload_one(offload, 0);
214 __skb_queue_tail(&offload->skb_irq_queue, skb);
222 int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
227 if (skb_queue_len(&offload->skb_queue) >
228 offload->skb_queue_len_max) {
236 __skb_queue_add_sort(&offload->skb_irq_queue, skb,
244 can_rx_offload_get_echo_skb_queue_timestamp(struct can_rx_offload *offload,
248 struct net_device *dev = offload->dev;
258 err = can_rx_offload_queue_timestamp(offload, skb, timestamp);
268 int can_rx_offload_queue_tail(struct can_rx_offload *offload,
271 if (skb_queue_len(&offload->skb_queue) >
272 offload->skb_queue_len_max) {
277 __skb_queue_tail(&offload->skb_irq_queue, skb);
284 can_rx_offload_get_echo_skb_queue_tail(struct can_rx_offload *offload,
288 struct net_device *dev = offload->dev;
298 err = can_rx_offload_queue_tail(offload, skb);
308 void can_rx_offload_irq_finish(struct can_rx_offload *offload)
313 if (skb_queue_empty_lockless(&offload->skb_irq_queue))
316 spin_lock_irqsave(&offload->skb_queue.lock, flags);
317 skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
318 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
320 queue_len = skb_queue_len(&offload->skb_queue);
321 if (queue_len > offload->skb_queue_len_max / 8)
322 netdev_dbg(offload->dev, "%s: queue_len=%d\n",
325 napi_schedule(&offload->napi);
329 void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload)
334 if (skb_queue_empty_lockless(&offload->skb_irq_queue))
337 spin_lock_irqsave(&offload->skb_queue.lock, flags);
338 skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
339 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
341 queue_len = skb_queue_len(&offload->skb_queue);
342 if (queue_len > offload->skb_queue_len_max / 8)
343 netdev_dbg(offload->dev, "%s: queue_len=%d\n",
347 napi_schedule(&offload->napi);
353 struct can_rx_offload *offload,
356 offload->dev = dev;
359 offload->skb_queue_len_max = 2 << fls(weight);
360 offload->skb_queue_len_max *= 4;
361 skb_queue_head_init(&offload->skb_queue);
362 __skb_queue_head_init(&offload->skb_irq_queue);
364 netif_napi_add_weight(dev, &offload->napi, can_rx_offload_napi_poll,
368 __func__, offload->skb_queue_len_max);
374 struct can_rx_offload *offload)
378 if (offload->mb_first > BITS_PER_LONG_LONG ||
379 offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
382 if (offload->mb_first < offload->mb_last) {
383 offload->inc = true;
384 weight = offload->mb_last - offload->mb_first;
386 offload->inc = false;
387 weight = offload->mb_first - offload->mb_last;
390 return can_rx_offload_init_queue(dev, offload, weight);
395 struct can_rx_offload *offload, unsigned int weight)
397 if (!offload->mailbox_read)
400 return can_rx_offload_init_queue(dev, offload, weight);
405 struct can_rx_offload *offload,
408 if (offload->mailbox_read)
411 return can_rx_offload_init_queue(dev, offload, weight);
415 void can_rx_offload_enable(struct can_rx_offload *offload)
417 napi_enable(&offload->napi);
421 void can_rx_offload_del(struct can_rx_offload *offload)
423 netif_napi_del(&offload->napi);
424 skb_queue_purge(&offload->skb_queue);
425 __skb_queue_purge(&offload->skb_irq_queue);