/kernel/linux/linux-5.10/net/sched/ |
H A D | cls_flow.c | 394 struct flow_filter *fold, *fnew; in flow_change() local 433 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); in flow_change() 434 if (!fnew) in flow_change() 437 err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches); in flow_change() 441 err = tcf_exts_init(&fnew->exts, net, TCA_FLOW_ACT, TCA_FLOW_POLICE); in flow_change() 445 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr, in flow_change() 456 /* Copy fold into fnew */ in flow_change() 457 fnew->tp = fold->tp; in flow_change() 458 fnew in flow_change() [all...] |
H A D | cls_basic.c | 179 struct basic_filter *fnew; in basic_change() local 194 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); in basic_change() 195 if (!fnew) in basic_change() 198 err = tcf_exts_init(&fnew->exts, net, TCA_BASIC_ACT, TCA_BASIC_POLICE); in basic_change() 204 err = idr_alloc_u32(&head->handle_idr, fnew, &handle, in basic_change() 207 err = idr_alloc_u32(&head->handle_idr, fnew, &handle, in basic_change() 212 fnew->handle = handle; in basic_change() 213 fnew->pf = alloc_percpu(struct tc_basic_pcnt); in basic_change() 214 if (!fnew in basic_change() [all...] |
H A D | cls_fw.c | 258 struct fw_filter *pfp, *fnew; in fw_change() local 264 fnew = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); in fw_change() 265 if (!fnew) in fw_change() 268 fnew->id = f->id; in fw_change() 269 fnew->ifindex = f->ifindex; in fw_change() 270 fnew->tp = f->tp; in fw_change() 272 err = tcf_exts_init(&fnew->exts, net, TCA_FW_ACT, in fw_change() 275 kfree(fnew); in fw_change() 279 err = fw_set_parms(net, tp, fnew, tb, tca, base, ovr, extack); in fw_change() 281 tcf_exts_destroy(&fnew in fw_change() [all...] |
H A D | cls_flower.c | 1868 struct cls_fl_filter *fnew, in fl_check_assign_mask() 1881 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht, in fl_check_assign_mask() 1884 if (!fnew->mask) { in fl_check_assign_mask() 1898 fnew->mask = newmask; in fl_check_assign_mask() 1900 } else if (IS_ERR(fnew->mask)) { in fl_check_assign_mask() 1901 ret = PTR_ERR(fnew->mask); in fl_check_assign_mask() 1902 } else if (fold && fold->mask != fnew->mask) { in fl_check_assign_mask() 1904 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) { in fl_check_assign_mask() 1955 static int fl_ht_insert_unique(struct cls_fl_filter *fnew, in fl_ht_insert_unique() argument 1959 struct fl_flow_mask *mask = fnew in fl_ht_insert_unique() 1867 fl_check_assign_mask(struct cls_fl_head *head, struct cls_fl_filter *fnew, struct cls_fl_filter *fold, struct fl_flow_mask *mask) fl_check_assign_mask() argument 1985 struct cls_fl_filter *fnew; fl_change() local [all...] |
/kernel/linux/linux-6.6/net/sched/ |
H A D | cls_flow.c | 396 struct flow_filter *fold, *fnew; in flow_change() local 435 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); in flow_change() 436 if (!fnew) in flow_change() 439 err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches); in flow_change() 443 err = tcf_exts_init(&fnew->exts, net, TCA_FLOW_ACT, TCA_FLOW_POLICE); in flow_change() 447 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, flags, in flow_change() 458 /* Copy fold into fnew */ in flow_change() 459 fnew->tp = fold->tp; in flow_change() 460 fnew in flow_change() [all...] |
H A D | cls_basic.c | 181 struct basic_filter *fnew; in basic_change() local 196 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); in basic_change() 197 if (!fnew) in basic_change() 200 err = tcf_exts_init(&fnew->exts, net, TCA_BASIC_ACT, TCA_BASIC_POLICE); in basic_change() 206 err = idr_alloc_u32(&head->handle_idr, fnew, &handle, in basic_change() 209 err = idr_alloc_u32(&head->handle_idr, fnew, &handle, in basic_change() 214 fnew->handle = handle; in basic_change() 215 fnew->pf = alloc_percpu(struct tc_basic_pcnt); in basic_change() 216 if (!fnew in basic_change() [all...] |
H A D | cls_fw.c | 259 struct fw_filter *pfp, *fnew; in fw_change() local 265 fnew = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); in fw_change() 266 if (!fnew) in fw_change() 269 fnew->id = f->id; in fw_change() 270 fnew->ifindex = f->ifindex; in fw_change() 271 fnew->tp = f->tp; in fw_change() 273 err = tcf_exts_init(&fnew->exts, net, TCA_FW_ACT, in fw_change() 276 kfree(fnew); in fw_change() 280 err = fw_set_parms(net, tp, fnew, tb, tca, base, flags, extack); in fw_change() 282 tcf_exts_destroy(&fnew in fw_change() [all...] |
H A D | cls_flower.c | 2152 struct cls_fl_filter *fnew, in fl_check_assign_mask() 2165 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht, in fl_check_assign_mask() 2168 if (!fnew->mask) { in fl_check_assign_mask() 2182 fnew->mask = newmask; in fl_check_assign_mask() 2184 } else if (IS_ERR(fnew->mask)) { in fl_check_assign_mask() 2185 ret = PTR_ERR(fnew->mask); in fl_check_assign_mask() 2186 } else if (fold && fold->mask != fnew->mask) { in fl_check_assign_mask() 2188 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) { in fl_check_assign_mask() 2206 static int fl_ht_insert_unique(struct cls_fl_filter *fnew, in fl_ht_insert_unique() argument 2210 struct fl_flow_mask *mask = fnew in fl_ht_insert_unique() 2151 fl_check_assign_mask(struct cls_fl_head *head, struct cls_fl_filter *fnew, struct cls_fl_filter *fold, struct fl_flow_mask *mask) fl_check_assign_mask() argument 2238 struct cls_fl_filter *fnew; fl_change() local [all...] |
/kernel/linux/linux-5.10/drivers/hv/ |
H A D | channel_mgmt.c | 553 bool fnew = true; in vmbus_process_offer() local 599 fnew = false; in vmbus_process_offer() 604 if (fnew) { in vmbus_process_offer() 656 wq = fnew ? vmbus_connection.handle_primary_chan_wq : in vmbus_process_offer()
|
/kernel/linux/linux-6.6/drivers/hv/ |
H A D | channel_mgmt.c | 585 bool fnew = true; in vmbus_process_offer() local 619 fnew = false; in vmbus_process_offer() 637 if (fnew) { in vmbus_process_offer() 688 wq = fnew ? vmbus_connection.handle_primary_chan_wq : in vmbus_process_offer()
|