Lines Matching refs:head

30 	struct cls_mall_head *head = rcu_dereference_bh(tp->root);
32 if (unlikely(!head))
35 if (tc_skip_sw(head->flags))
38 *res = head->res;
39 __this_cpu_inc(head->pf->rhit);
40 return tcf_exts_exec(skb, &head->exts, res);
48 static void __mall_destroy(struct cls_mall_head *head)
50 tcf_exts_destroy(&head->exts);
51 tcf_exts_put_net(&head->exts);
52 free_percpu(head->pf);
53 kfree(head);
58 struct cls_mall_head *head = container_of(to_rcu_work(work),
62 __mall_destroy(head);
67 struct cls_mall_head *head,
74 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
79 &head->flags, &head->in_hw_count, true);
83 struct cls_mall_head *head,
89 bool skip_sw = tc_skip_sw(head->flags);
92 cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts));
96 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
100 err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
103 mall_destroy_hw_filter(tp, head, cookie, NULL);
113 skip_sw, &head->flags, &head->in_hw_count, true);
118 mall_destroy_hw_filter(tp, head, cookie, NULL);
122 if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
131 struct cls_mall_head *head = rtnl_dereference(tp->root);
133 if (!head)
136 tcf_unbind_filter(tp, &head->res);
138 if (!tc_skip_hw(head->flags))
139 mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
141 if (tcf_exts_get_net(&head->exts))
142 tcf_queue_work(&head->rwork, mall_destroy_work);
144 __mall_destroy(head);
149 struct cls_mall_head *head = rtnl_dereference(tp->root);
151 if (head && head->handle == handle)
152 return head;
164 struct cls_mall_head *head,
171 err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr, true,
177 head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
178 tcf_bind_filter(tp, &head->res, base);
189 struct cls_mall_head *head = rtnl_dereference(tp->root);
198 if (head)
245 *arg = head;
262 struct cls_mall_head *head = rtnl_dereference(tp->root);
264 head->deleting = true;
272 struct cls_mall_head *head = rtnl_dereference(tp->root);
277 if (!head || head->deleting)
279 if (arg->fn(tp, head, arg) < 0)
288 struct cls_mall_head *head = rtnl_dereference(tp->root);
293 if (tc_skip_hw(head->flags))
296 cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts));
300 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
303 cls_mall.cookie = (unsigned long)head;
305 err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
308 if (add && tc_skip_sw(head->flags)) {
316 &cls_mall, cb_priv, &head->flags,
317 &head->in_hw_count);
328 struct cls_mall_head *head,
334 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL);
340 tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes,
351 struct cls_mall_head *head = fh;
355 if (!head)
358 if (!tc_skip_hw(head->flags))
359 mall_stats_hw_filter(tp, head, (unsigned long)head);
361 t->tcm_handle = head->handle;
367 if (head->res.classid &&
368 nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
371 if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
375 struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
385 if (tcf_exts_dump(skb, &head->exts))
390 if (tcf_exts_dump_stats(skb, &head->exts) < 0)
403 struct cls_mall_head *head = fh;
405 if (head && head->res.classid == classid) {
407 __tcf_bind_filter(q, &head->res, base);
409 __tcf_unbind_filter(q, &head->res);