Lines Matching defs:caifd
94 struct caif_device_entry *caifd;
96 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
97 if (!caifd)
99 caifd->pcpu_refcnt = alloc_percpu(int);
100 if (!caifd->pcpu_refcnt) {
101 kfree(caifd);
104 caifd->netdev = dev;
106 return caifd;
113 struct caif_device_entry *caifd;
115 list_for_each_entry_rcu(caifd, &caifdevs->list, list,
117 if (caifd->netdev == dev)
118 return caifd;
125 struct caif_device_entry *caifd;
132 caifd = caif_get(skb->dev);
134 WARN_ON(caifd == NULL);
135 if (!caifd) {
140 caifd_hold(caifd);
143 spin_lock_bh(&caifd->flow_lock);
144 send_xoff = caifd->xoff;
145 caifd->xoff = false;
146 dtor = caifd->xoff_skb_dtor;
148 if (WARN_ON(caifd->xoff_skb != skb))
151 caifd->xoff_skb = NULL;
152 caifd->xoff_skb_dtor = NULL;
154 spin_unlock_bh(&caifd->flow_lock);
160 caifd->layer.up->
161 ctrlcmd(caifd->layer.up,
163 caifd->layer.id);
164 caifd_put(caifd);
170 struct caif_device_entry *caifd =
178 skb->dev = caifd->netdev;
183 if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
186 if (unlikely(caifd->xoff))
189 if (likely(!netif_queue_stopped(caifd->netdev))) {
201 high = (caifd->netdev->tx_queue_len * q_high) / 100;
207 spin_lock_bh(&caifd->flow_lock);
208 if (caifd->xoff) {
209 spin_unlock_bh(&caifd->flow_lock);
221 netif_queue_stopped(caifd->netdev),
223 caifd->xoff = true;
224 caifd->xoff_skb = skb;
225 caifd->xoff_skb_dtor = skb->destructor;
227 spin_unlock_bh(&caifd->flow_lock);
229 caifd->layer.up->ctrlcmd(caifd->layer.up,
231 caifd->layer.id);
250 struct caif_device_entry *caifd;
256 caifd = caif_get(dev);
258 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
259 !netif_oper_up(caifd->netdev)) {
266 caifd_hold(caifd);
269 err = caifd->layer.up->receive(caifd->layer.up, pkt);
276 caifd_put(caifd);
290 struct caif_device_entry *caifd;
294 caifd = caif_get(dev);
295 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
300 caifd_hold(caifd);
303 caifd->layer.up->ctrlcmd(caifd->layer.up,
307 caifd->layer.id);
308 caifd_put(caifd);
318 struct caif_device_entry *caifd;
325 caifd = caif_device_alloc(dev);
326 if (!caifd)
328 *layer = &caifd->layer;
329 spin_lock_init(&caifd->flow_lock);
343 list_add_rcu(&caifd->list, &caifdevs->list);
345 strscpy(caifd->layer.name, dev->name,
346 sizeof(caifd->layer.name));
347 caifd->layer.transmit = transmit;
350 &caifd->layer,
367 struct caif_device_entry *caifd = NULL;
378 caifd = caif_get(dev);
379 if (caifd == NULL && dev->type != ARPHRD_CAIF)
384 if (caifd != NULL)
409 caifd = caif_get(dev);
410 if (caifd == NULL) {
415 caifd->xoff = false;
416 cfcnfg_set_phy_state(cfg, &caifd->layer, true);
424 caifd = caif_get(dev);
425 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
430 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
431 caifd_hold(caifd);
434 caifd->layer.up->ctrlcmd(caifd->layer.up,
436 caifd->layer.id);
438 spin_lock_bh(&caifd->flow_lock);
447 if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
448 caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
450 caifd->xoff = false;
451 caifd->xoff_skb_dtor = NULL;
452 caifd->xoff_skb = NULL;
454 spin_unlock_bh(&caifd->flow_lock);
455 caifd_put(caifd);
461 caifd = caif_get(dev);
462 if (caifd == NULL) {
466 list_del_rcu(&caifd->list);
471 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
480 if (caifd_refcnt_read(caifd) != 0 ||
481 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
485 list_add_rcu(&caifd->list, &caifdevs->list);
491 dev_put(caifd->netdev);
492 free_percpu(caifd->pcpu_refcnt);
493 kfree(caifd);
522 struct caif_device_entry *caifd, *tmp;
530 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
532 list_del_rcu(&caifd->list);
533 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
536 (caifd_refcnt_read(caifd) != 0 ||
537 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
544 dev_put(caifd->netdev);
545 free_percpu(caifd->pcpu_refcnt);
546 kfree(caifd);