Lines Matching defs:mask
1005 * @mask: bitmask of bits in if_flags to check
1013 unsigned short mask)
1021 if (((dev->flags ^ if_flags) & mask) == 0) {
2584 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2637 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2674 if (netif_attr_test_mask(j, mask, nr_ids) &&
2747 netif_attr_test_mask(j, mask, dev_maps->nr_ids) &&
2788 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2794 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
4535 flow_id = skb_get_hash(skb) & flow_table->mask;
4605 ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
4614 rflow = &flow_table->flows[hash & flow_table->mask];
4681 if (flow_table && flow_id <= flow_table->mask) {
4687 (int)(10 * flow_table->mask)))
5909 * large mask
6132 /* If n->poll_list is not empty, we need to mask irqs */
9085 * @mask: proto down mask
9088 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
9093 if (!mask) {
9096 for_each_set_bit(b, &mask, 32) {
11333 * @mask: mask feature set
11337 * enable anything that is off in @mask. Returns the new feature set.
11340 netdev_features_t one, netdev_features_t mask)
11342 if (mask & NETIF_F_HW_CSUM)
11343 mask |= NETIF_F_CSUM_MASK;
11344 mask |= NETIF_F_VLAN_CHALLENGED;
11346 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;