Lines Matching refs:ro
123 struct raw_sock *ro = raw_sk(sk);
129 if (!ro->recv_own_msgs && oskb->sk == sk)
133 if (!ro->fd_frames && oskb->len != CAN_MTU)
137 if (this_cpu_ptr(ro->uniq)->skb == oskb &&
138 this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
139 if (ro->join_filters) {
140 this_cpu_inc(ro->uniq->join_rx_count);
142 if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
148 this_cpu_ptr(ro->uniq)->skb = oskb;
149 this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
150 this_cpu_ptr(ro->uniq)->join_rx_count = 1;
152 if (ro->join_filters && ro->count > 1)
247 struct raw_sock *ro = raw_sk(sk);
249 raw_disable_filters(net, dev, sk, ro->filter, ro->count);
250 raw_disable_errfilter(net, dev, sk, ro->err_mask);
256 struct raw_sock *ro = raw_sk(sk);
259 err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
261 err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
263 raw_disable_filters(net, dev, sk, ro->filter,
264 ro->count);
270 static void raw_notify(struct raw_sock *ro, unsigned long msg,
273 struct sock *sk = &ro->sk;
278 if (ro->ifindex != dev->ifindex)
285 if (ro->bound)
288 if (ro->count > 1)
289 kfree(ro->filter);
291 ro->ifindex = 0;
292 ro->bound = 0;
293 ro->count = 0;
334 struct raw_sock *ro = raw_sk(sk);
336 ro->bound = 0;
337 ro->ifindex = 0;
340 ro->dfilter.can_id = 0;
341 ro->dfilter.can_mask = MASK_ALL;
342 ro->filter = &ro->dfilter;
343 ro->count = 1;
346 ro->loopback = 1;
347 ro->recv_own_msgs = 0;
348 ro->fd_frames = 0;
349 ro->join_filters = 0;
352 ro->uniq = alloc_percpu(struct uniqframe);
353 if (unlikely(!ro->uniq))
358 list_add_tail(&ro->notifier, &raw_notifier_list);
367 struct raw_sock *ro;
372 ro = raw_sk(sk);
375 while (raw_busy_notifier == ro) {
380 list_del(&ro->notifier);
386 if (ro->bound) {
387 if (ro->ifindex) {
390 dev = dev_get_by_index(sock_net(sk), ro->ifindex);
400 if (ro->count > 1)
401 kfree(ro->filter);
403 ro->ifindex = 0;
404 ro->bound = 0;
405 ro->count = 0;
406 free_percpu(ro->uniq);
421 struct raw_sock *ro = raw_sk(sk);
433 if (ro->bound && addr->can_ifindex == ro->ifindex)
465 if (ro->bound) {
467 if (ro->ifindex) {
471 ro->ifindex);
481 ro->ifindex = ifindex;
482 ro->bound = 1;
502 struct raw_sock *ro = raw_sk(sk);
509 addr->can_ifindex = ro->ifindex;
518 struct raw_sock *ro = raw_sk(sk);
552 if (ro->bound && ro->ifindex) {
553 dev = dev_get_by_index(sock_net(sk), ro->ifindex);
562 if (ro->bound) {
577 raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
578 ro->count);
582 if (ro->count > 1)
583 kfree(ro->filter);
588 ro->dfilter = sfilter;
589 filter = &ro->dfilter;
591 ro->filter = filter;
592 ro->count = count;
615 if (ro->bound && ro->ifindex) {
616 dev = dev_get_by_index(sock_net(sk), ro->ifindex);
624 if (ro->bound) {
634 ro->err_mask);
638 ro->err_mask = err_mask;
650 if (optlen != sizeof(ro->loopback))
653 if (copy_from_sockptr(&ro->loopback, optval, optlen))
659 if (optlen != sizeof(ro->recv_own_msgs))
662 if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen))
668 if (optlen != sizeof(ro->fd_frames))
671 if (copy_from_sockptr(&ro->fd_frames, optval, optlen))
677 if (optlen != sizeof(ro->join_filters))
680 if (copy_from_sockptr(&ro->join_filters, optval, optlen))
695 struct raw_sock *ro = raw_sk(sk);
710 if (ro->count > 0) {
711 int fsize = ro->count * sizeof(struct can_filter);
715 if (copy_to_user(optval, ro->filter, len))
729 val = &ro->err_mask;
735 val = &ro->loopback;
741 val = &ro->recv_own_msgs;
747 val = &ro->fd_frames;
753 val = &ro->join_filters;
770 struct raw_sock *ro = raw_sk(sk);
787 ifindex = ro->ifindex;
795 if (ro->fd_frames && dev->mtu == CANFD_MTU) {
822 err = can_send(skb, ro->loopback);