Lines Matching defs:offload

9 #include <linux/can/rx-offload.h>
24 can_rx_offload_le(struct can_rx_offload *offload,
27 if (offload->inc)
34 can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
36 if (offload->inc)
44 struct can_rx_offload *offload = container_of(napi,
47 struct net_device *dev = offload->dev;
53 (skb = skb_dequeue(&offload->skb_queue))) {
66 if (!skb_queue_empty(&offload->skb_queue))
67 napi_reschedule(&offload->napi);
70 can_led_event(offload->dev, CAN_LED_EVENT_RX);
120 * @offload: pointer to rx_offload context
140 can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
148 if (unlikely(skb_queue_len(&offload->skb_queue) >
149 offload->skb_queue_len_max))
152 skb = offload->mailbox_read(offload, n, &timestamp, drop);
161 offload->dev->stats.rx_dropped++;
162 offload->dev->stats.rx_fifo_errors++;
174 int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
182 for (i = offload->mb_first;
183 can_rx_offload_le(offload, i, offload->mb_last);
184 can_rx_offload_inc(offload, &i)) {
190 skb = can_rx_offload_offload_one(offload, i);
201 spin_lock_irqsave(&offload->skb_queue.lock, flags);
202 skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
203 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
205 queue_len = skb_queue_len(&offload->skb_queue);
206 if (queue_len > offload->skb_queue_len_max / 8)
207 netdev_dbg(offload->dev, "%s: queue_len=%d\n",
210 can_rx_offload_schedule(offload);
217 int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
223 skb = can_rx_offload_offload_one(offload, 0);
229 skb_queue_tail(&offload->skb_queue, skb);
234 can_rx_offload_schedule(offload);
240 int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
246 if (skb_queue_len(&offload->skb_queue) >
247 offload->skb_queue_len_max) {
255 spin_lock_irqsave(&offload->skb_queue.lock, flags);
256 __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
257 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
259 can_rx_offload_schedule(offload);
265 unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
268 struct net_device *dev = offload->dev;
278 err = can_rx_offload_queue_sorted(offload, skb, timestamp);
288 int can_rx_offload_queue_tail(struct can_rx_offload *offload,
291 if (skb_queue_len(&offload->skb_queue) >
292 offload->skb_queue_len_max) {
297 skb_queue_tail(&offload->skb_queue, skb);
298 can_rx_offload_schedule(offload);
305 struct can_rx_offload *offload,
308 offload->dev = dev;
311 offload->skb_queue_len_max = 2 << fls(weight);
312 offload->skb_queue_len_max *= 4;
313 skb_queue_head_init(&offload->skb_queue);
315 netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
318 __func__, offload->skb_queue_len_max);
324 struct can_rx_offload *offload)
328 if (offload->mb_first > BITS_PER_LONG_LONG ||
329 offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
332 if (offload->mb_first < offload->mb_last) {
333 offload->inc = true;
334 weight = offload->mb_last - offload->mb_first;
336 offload->inc = false;
337 weight = offload->mb_first - offload->mb_last;
340 return can_rx_offload_init_queue(dev, offload, weight);
345 struct can_rx_offload *offload, unsigned int weight)
347 if (!offload->mailbox_read)
350 return can_rx_offload_init_queue(dev, offload, weight);
355 struct can_rx_offload *offload,
358 if (offload->mailbox_read)
361 return can_rx_offload_init_queue(dev, offload, weight);
365 void can_rx_offload_enable(struct can_rx_offload *offload)
367 napi_enable(&offload->napi);
371 void can_rx_offload_del(struct can_rx_offload *offload)
373 netif_napi_del(&offload->napi);
374 skb_queue_purge(&offload->skb_queue);