Lines Matching refs:fl
60 #define for_each_fl_rcu(hash, fl) \
61 for (fl = rcu_dereference_bh(fl_ht[(hash)]); \
62 fl != NULL; \
63 fl = rcu_dereference_bh(fl->next))
64 #define for_each_fl_continue_rcu(fl) \
65 for (fl = rcu_dereference_bh(fl->next); \
66 fl != NULL; \
67 fl = rcu_dereference_bh(fl->next))
76 struct ip6_flowlabel *fl;
78 for_each_fl_rcu(FL_HASH(label), fl) {
79 if (fl->label == label && net_eq(fl->fl_net, net))
80 return fl;
87 struct ip6_flowlabel *fl;
90 fl = __fl_lookup(net, label);
91 if (fl && !atomic_inc_not_zero(&fl->users))
92 fl = NULL;
94 return fl;
97 static bool fl_shared_exclusive(struct ip6_flowlabel *fl)
99 return fl->share == IPV6_FL_S_EXCL ||
100 fl->share == IPV6_FL_S_PROCESS ||
101 fl->share == IPV6_FL_S_USER;
106 struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
108 if (fl->share == IPV6_FL_S_PROCESS)
109 put_pid(fl->owner.pid);
110 kfree(fl->opt);
111 kfree(fl);
115 static void fl_free(struct ip6_flowlabel *fl)
117 if (!fl)
120 if (fl_shared_exclusive(fl) || fl->opt)
123 call_rcu(&fl->rcu, fl_free_rcu);
126 static void fl_release(struct ip6_flowlabel *fl)
130 fl->lastuse = jiffies;
131 if (atomic_dec_and_test(&fl->users)) {
132 unsigned long ttd = fl->lastuse + fl->linger;
133 if (time_after(ttd, fl->expires))
134 fl->expires = ttd;
135 ttd = fl->expires;
136 if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
137 struct ipv6_txoptions *opt = fl->opt;
138 fl->opt = NULL;
157 struct ip6_flowlabel *fl;
161 while ((fl = rcu_dereference_protected(*flp,
163 if (atomic_read(&fl->users) == 0) {
164 unsigned long ttd = fl->lastuse + fl->linger;
165 if (time_after(ttd, fl->expires))
166 fl->expires = ttd;
167 ttd = fl->expires;
169 *flp = fl->next;
170 fl_free(fl);
177 flp = &fl->next;
194 struct ip6_flowlabel *fl;
198 while ((fl = rcu_dereference_protected(*flp,
200 if (net_eq(fl->fl_net, net) &&
201 atomic_read(&fl->users) == 0) {
202 *flp = fl->next;
203 fl_free(fl);
207 flp = &fl->next;
214 struct ip6_flowlabel *fl, __be32 label)
218 fl->label = label & IPV6_FLOWLABEL_MASK;
223 fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
224 if (fl->label) {
225 lfl = __fl_lookup(net, fl->label);
239 lfl = __fl_lookup(net, fl->label);
247 fl->lastuse = jiffies;
248 fl->next = fl_ht[FL_HASH(fl->label)];
249 rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
268 struct ip6_flowlabel *fl = sfl->fl;
270 if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
271 fl->lastuse = jiffies;
273 return fl;
295 fl_release(sfl->fl);
313 struct ip6_flowlabel *fl,
316 struct ipv6_txoptions *fl_opt = fl->opt;
350 static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
360 fl->lastuse = jiffies;
361 if (time_before(fl->linger, linger))
362 fl->linger = linger;
363 if (time_before(expires, fl->linger))
364 expires = fl->linger;
365 if (time_before(fl->expires, fl->lastuse + expires))
366 fl->expires = fl->lastuse + expires;
376 struct ip6_flowlabel *fl = NULL;
387 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
388 if (!fl)
397 fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
398 if (!fl->opt)
401 memset(fl->opt, 0, sizeof(*fl->opt));
402 fl->opt->tot_len = sizeof(*fl->opt) + olen;
404 if (copy_from_sockptr_offset(fl->opt + 1, optval,
409 msg.msg_control = (void *)(fl->opt+1);
412 ipc6.opt = fl->opt;
417 if (fl->opt->opt_flen)
419 if (fl->opt->opt_nflen == 0) {
420 kfree(fl->opt);
421 fl->opt = NULL;
425 fl->fl_net = net;
426 fl->expires = jiffies;
427 err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
430 fl->share = freq->flr_share;
437 fl->dst = freq->flr_dst;
438 atomic_set(&fl->users, 1);
439 switch (fl->share) {
444 fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
447 fl->owner.uid = current_euid();
453 if (fl_shared_exclusive(fl) || fl->opt) {
457 return fl;
460 if (fl) {
461 kfree(fl->opt);
462 kfree(fl);
493 struct ip6_flowlabel *fl)
496 sfl->fl = fl;
521 if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
523 freq->flr_label = sfl->fl->label;
524 freq->flr_dst = sfl->fl->dst;
525 freq->flr_share = sfl->fl->share;
526 freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
527 freq->flr_linger = sfl->fl->linger / HZ;
562 if (sfl->fl->label == freq->flr_label)
572 fl_release(sfl->fl);
586 if (sfl->fl->label == freq->flr_label) {
587 err = fl6_renew(sfl->fl, freq->flr_linger,
597 struct ip6_flowlabel *fl = fl_lookup(net, freq->flr_label);
599 if (fl) {
600 err = fl6_renew(fl, freq->flr_linger,
602 fl_release(fl);
613 struct ip6_flowlabel *fl, *fl1 = NULL;
636 fl = fl_create(net, sk, freq, optval, optlen, &err);
637 if (!fl)
646 if (sfl->fl->label == freq->flr_label) {
651 fl1 = sfl->fl;
668 fl1->share != fl->share ||
670 (fl1->owner.pid != fl->owner.pid)) ||
672 !uid_eq(fl1->owner.uid, fl->owner.uid)))
678 if (fl->linger > fl1->linger)
679 fl1->linger = fl->linger;
680 if ((long)(fl->expires - fl1->expires) > 0)
681 fl1->expires = fl->expires;
683 fl_free(fl);
703 fl1 = fl_intern(net, fl, freq->flr_label);
710 if (copy_to_sockptr_offset(optval, offset, &fl->label,
711 sizeof(fl->label))) {
716 fl_link(np, sfl1, fl);
719 fl_free(fl);
757 struct ip6_flowlabel *fl = NULL;
762 for_each_fl_rcu(state->bucket, fl) {
763 if (net_eq(fl->fl_net, net))
767 fl = NULL;
769 return fl;
772 static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
777 for_each_fl_continue_rcu(fl) {
778 if (net_eq(fl->fl_net, net))
784 for_each_fl_rcu(state->bucket, fl) {
785 if (net_eq(fl->fl_net, net))
790 fl = NULL;
793 return fl;
798 struct ip6_flowlabel *fl = ip6fl_get_first(seq);
799 if (fl)
800 while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
802 return pos ? NULL : fl;
818 struct ip6_flowlabel *fl;
821 fl = ip6fl_get_first(seq);
823 fl = ip6fl_get_next(seq, v);
825 return fl;
840 struct ip6_flowlabel *fl = v;
843 (unsigned int)ntohl(fl->label),
844 fl->share,
845 ((fl->share == IPV6_FL_S_PROCESS) ?
846 pid_nr_ns(fl->owner.pid, state->pid_ns) :
847 ((fl->share == IPV6_FL_S_USER) ?
848 from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
850 atomic_read(&fl->users),
851 fl->linger/HZ,
852 (long)(fl->expires - jiffies)/HZ,
853 &fl->dst,
854 fl->opt ? fl->opt->opt_nflen : 0);