/kernel/linux/linux-6.6/drivers/block/zram/ |
H A D | zram_drv.c | 105 static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio) in zram_set_priority() argument 107 prio &= ZRAM_COMP_PRIORITY_MASK; in zram_set_priority() 114 zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1); in zram_set_priority() 119 u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1; in zram_get_priority() local 121 return prio & ZRAM_COMP_PRIORITY_MASK; in zram_get_priority() 900 static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg) in comp_algorithm_set() argument 903 if (zram->comp_algs[prio] != default_compressor) in comp_algorithm_set() 904 kfree(zram->comp_algs[prio]); in comp_algorithm_set() 906 zram->comp_algs[prio] = alg; in comp_algorithm_set() 909 static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, cha argument 920 __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf) __comp_algorithm_store() argument 983 u32 prio; recomp_algorithm_show() local 1002 int prio = ZRAM_SECONDARY_COMP; recomp_algorithm_store() local 1325 u32 prio; zram_read_from_zspool() local 1597 zram_recompress(struct zram *zram, u32 index, struct page *page, u32 threshold, u32 prio, u32 prio_max) zram_recompress() argument 1736 u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS; recompress_store() local 1998 u32 prio; zram_destroy_comps() local 2043 u32 prio; disksize_store() local [all...] |
/kernel/linux/linux-5.10/tools/testing/selftests/drivers/net/mlxsw/ |
H A D | qos_dscp_router.sh | 100 local prio 102 for prio in {0..7}; do 103 echo app=$prio,5,$((base + prio)) 172 local prio=$1; shift 178 local prio2=$($reprio $prio) # ICMP Request egress prio 179 local prio3=$($reprio $prio2) # ICMP Response egress prio 181 local dscp=$((prio << 2)) # ICMP Request ingress DSCP 219 log_test "DSCP rewrite: $dscp-(prio [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | fs_core.c | 145 int prio; member 384 unsigned int prio) in find_prio() 389 if (iter_prio->prio == prio) in find_prio() 481 struct fs_prio *prio; in del_sw_flow_table() local 487 fs_get_obj(prio, ft->node.parent); in del_sw_flow_table() 488 prio->num_ft--; in del_sw_flow_table() 839 * If skip is true, skip the flow tables in the same prio_chains prio. 878 struct fs_prio *prio; in find_next_fwd_ft() local 882 fs_get_obj(prio, next_n in find_next_fwd_ft() 383 find_prio(struct mlx5_flow_namespace *ns, unsigned int prio) find_prio() argument 887 connect_fts_in_prio(struct mlx5_core_dev *dev, struct fs_prio *prio, struct mlx5_flow_table *ft) connect_fts_in_prio() argument 924 connect_prev_fts(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, struct fs_prio *prio) connect_prev_fts() argument 959 update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio *prio) update_root_ft_create() argument 1080 connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, struct fs_prio *prio) connect_flow_table() argument 1107 list_add_flow_table(struct mlx5_flow_table *ft, struct fs_prio *prio) list_add_flow_table() argument 1215 mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, int prio, int max_fte, u32 level, u16 vport) mlx5_create_vport_flow_table() argument 1229 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns, int prio, u32 level) mlx5_create_lag_demux_flow_table() argument 2133 struct fs_prio *prio; find_next_ft() local 2198 struct fs_prio *prio; disconnect_flow_table() local 2268 int prio = 0; mlx5_get_flow_namespace() local 2351 _fs_create_prio(struct mlx5_flow_namespace *ns, unsigned int prio, int num_levels, enum fs_node_type type) _fs_create_prio() argument 2372 fs_create_prio_chained(struct mlx5_flow_namespace *ns, unsigned int prio, int num_levels) fs_create_prio_chained() argument 2379 fs_create_prio(struct mlx5_flow_namespace *ns, unsigned int prio, int num_levels) fs_create_prio() argument 2393 fs_create_namespace(struct fs_prio *prio, int def_miss_act) fs_create_namespace() argument 2411 create_leaf_prios(struct mlx5_flow_namespace *ns, int prio, struct init_tree_node *prio_metadata) create_leaf_prios() argument 2441 init_root_tree_recursive(struct mlx5_flow_steering *steering, struct init_tree_node *init_node, struct fs_node *fs_parent_node, struct init_tree_node *init_parent_node, int prio) init_root_tree_recursive() argument 2558 struct fs_prio *prio; set_prio_attrs_in_ns() local 2568 set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level) set_prio_attrs_in_prio() argument 2593 struct fs_prio *prio; set_prio_attrs() local 2728 struct fs_prio *prio; init_sniffer_tx_root_ns() local 2741 struct fs_prio *prio; init_sniffer_rx_root_ns() local 2819 int prio; create_fdb_sub_ns_prio_chain() local 2931 struct fs_prio *prio; init_egress_acl_root_ns() local 2944 struct fs_prio *prio; init_ingress_acl_root_ns() local [all...] |
H A D | fs_core.h | 71 * a new flow table. Meaning the last flow table in a TYPE_PRIO prio in one 73 * found in any prio in any next namespace, but skip the entire containing 74 * TYPE_PRIO_CHAINS prio. 77 * namespace inside a containing TYPE_PRIO_CHAINS prio. 229 unsigned int prio; member 298 #define fs_for_each_ns_or_ft_reverse(pos, prio) \ 299 list_for_each_entry_reverse(pos, &(prio)->node.children, list) 301 #define fs_for_each_ns_or_ft(pos, prio) \ 302 list_for_each_entry(pos, (&(prio)->node.children), list) 307 #define fs_for_each_ns(pos, prio) \ [all...] |
/kernel/linux/linux-5.10/kernel/sched/rtg/ |
H A D | frame_rtg.c | 31 ret = ((task->prio < MAX_RT_PRIO) && in is_rtg_rt_task() 274 struct task_struct *task, int prio) in do_update_frame_task_prio() 278 bool is_rt_task = (prio != NOT_RT_PRIO); in do_update_frame_task_prio() 296 sp.sched_priority = MAX_USER_RT_PRIO - 1 - prio; in do_update_frame_task_prio() 367 static void update_frame_task_prio(struct frame_info *frame_info, int prio) in update_frame_task_prio() argument 378 do_update_frame_task_prio(frame_info, thread, prio); in update_frame_task_prio() 382 void set_frame_prio(struct frame_info *frame_info, int prio) in set_frame_prio() argument 388 if (frame_info->prio == prio) in set_frame_prio() 391 update_frame_task_prio(frame_info, prio); in set_frame_prio() 273 do_update_frame_task_prio(struct frame_info *frame_info, struct task_struct *task, int prio) do_update_frame_task_prio() argument 397 do_set_rtg_sched(struct task_struct *task, bool is_rtg, int grp_id, int prio) do_set_rtg_sched() argument 445 set_rtg_sched(struct task_struct *task, bool is_rtg, int grp_id, int prio) set_rtg_sched() argument 475 set_frame_rtg_thread(int grp_id, struct task_struct *task, bool is_rtg, int prio) set_frame_rtg_thread() argument 496 update_frame_thread(struct frame_info *frame_info, int old_prio, int prio, int pid, struct task_struct *old_task) update_frame_thread() argument 552 int prio; update_frame_thread_info() local 583 do_set_frame_sched_state(struct frame_info *frame_info, struct task_struct *task, bool enable, int prio) do_set_frame_sched_state() argument 607 int prio; set_frame_sched_state() local [all...] |
/kernel/linux/linux-6.6/kernel/sched/rtg/ |
H A D | frame_rtg.c | 31 ret = ((task->prio < MAX_RT_PRIO) && in is_rtg_rt_task() 274 struct task_struct *task, int prio) in do_update_frame_task_prio() 278 bool is_rt_task = (prio != NOT_RT_PRIO); in do_update_frame_task_prio() 296 sp.sched_priority = MAX_USER_RT_PRIO - 1 - prio; in do_update_frame_task_prio() 367 static void update_frame_task_prio(struct frame_info *frame_info, int prio) in update_frame_task_prio() argument 378 do_update_frame_task_prio(frame_info, thread, prio); in update_frame_task_prio() 382 void set_frame_prio(struct frame_info *frame_info, int prio) in set_frame_prio() argument 388 if (frame_info->prio == prio) in set_frame_prio() 391 update_frame_task_prio(frame_info, prio); in set_frame_prio() 273 do_update_frame_task_prio(struct frame_info *frame_info, struct task_struct *task, int prio) do_update_frame_task_prio() argument 397 do_set_rtg_sched(struct task_struct *task, bool is_rtg, int grp_id, int prio) do_set_rtg_sched() argument 445 set_rtg_sched(struct task_struct *task, bool is_rtg, int grp_id, int prio) set_rtg_sched() argument 475 set_frame_rtg_thread(int grp_id, struct task_struct *task, bool is_rtg, int prio) set_frame_rtg_thread() argument 496 update_frame_thread(struct frame_info *frame_info, int old_prio, int prio, int pid, struct task_struct *old_task) update_frame_thread() argument 552 int prio; update_frame_thread_info() local 583 do_set_frame_sched_state(struct frame_info *frame_info, struct task_struct *task, bool enable, int prio) do_set_frame_sched_state() argument 607 int prio; set_frame_sched_state() local [all...] |
/kernel/linux/linux-5.10/arch/powerpc/kvm/ |
H A D | book3s_xive.c | 175 int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, in kvmppc_xive_attach_escalation() argument 179 struct xive_q *q = &xc->queues[prio]; in kvmppc_xive_attach_escalation() 184 if (xc->esc_virq[prio]) in kvmppc_xive_attach_escalation() 188 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq); in kvmppc_xive_attach_escalation() 189 if (!xc->esc_virq[prio]) { in kvmppc_xive_attach_escalation() 191 prio, xc->server_num); in kvmppc_xive_attach_escalation() 200 vcpu->kvm->arch.lpid, xc->server_num, prio); in kvmppc_xive_attach_escalation() 203 prio, xc->server_num); in kvmppc_xive_attach_escalation() 208 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], pri in kvmppc_xive_attach_escalation() 245 xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio) xive_provision_queue() argument 281 xive_check_provisioning(struct kvm *kvm, u8 prio) xive_check_provisioning() argument 313 xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio) xive_inc_q_pending() argument 333 xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio) xive_try_pick_queue() argument 353 kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio) kvmppc_xive_select_target() argument 471 xive_finish_unmask(struct kvmppc_xive *xive, struct kvmppc_xive_src_block *sb, struct kvmppc_xive_irq_state *state, u8 prio) xive_finish_unmask() argument 528 xive_target_interrupt(struct kvm *kvm, struct kvmppc_xive_irq_state *state, u32 server, u8 prio) xive_target_interrupt() argument 906 u8 prio; kvmppc_xive_set_mapped() local 1001 u8 prio; kvmppc_xive_clr_mapped() local 1528 u64 val, prio; xive_get_source() local [all...] |
/kernel/linux/linux-6.6/tools/testing/selftests/drivers/net/mlxsw/ |
H A D | qos_headroom.sh | 80 check_err $? "prio PFC is '$current', expected '$expect'" 124 dcb ets set dev $swp prio-tc 0:0 1:2 2:4 3:6 4:1 5:3 6:5 7:7 130 dcb ets set dev $swp prio-tc all:0 135 dcb buffer set dev $swp prio-buffer 0:1 1:3 2:5 3:7 4:0 5:2 6:4 7:6 2>/dev/null 177 dcb ets set dev $swp prio-tc all:0 5:1 6:2 7:3 196 dcb pfc set dev $swp prio-pfc all:off 5:on 6:on 7:on delay 0 224 dcb pfc set dev $swp prio-pfc all:off delay 0 225 dcb ets set dev $swp prio-tc all:0 245 dcb ets set dev $swp prio-tc 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 251 dcb buffer set dev $swp prio [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/marvell/octeontx2/nic/ |
H A D | qos.c | 97 /* configure prio/quantum */ in otx2_config_sched_shaping() 99 cfg->regval[*num_regs] = node->prio << 24 | in otx2_config_sched_shaping() 435 if (node->prio > parent->max_static_prio) in otx2_qos_add_child_node() 436 parent->max_static_prio = node->prio; in otx2_qos_add_child_node() 440 if (tmp_node->prio == node->prio && in otx2_qos_add_child_node() 443 if (tmp_node->prio > node->prio) { in otx2_qos_add_child_node() 471 txschq_node->prio = 0; in otx2_qos_alloc_txschq_node() 500 u16 classid, u32 prio, u6 in otx2_qos_sw_create_leaf_node() 498 otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf, struct otx2_qos_node *parent, u16 classid, u32 prio, u64 rate, u64 ceil, u32 quantum, u16 qid, bool static_cfg) otx2_qos_sw_create_leaf_node() argument 1111 otx2_qos_validate_dwrr_cfg(struct otx2_qos_node *parent, struct netlink_ext_ack *extack, struct otx2_nic *pfvf, u64 prio, u64 quantum) otx2_qos_validate_dwrr_cfg() argument 1134 otx2_qos_validate_configuration(struct otx2_qos_node *parent, struct netlink_ext_ack *extack, struct otx2_nic *pfvf, u64 prio, bool static_cfg) otx2_qos_validate_configuration() argument 1153 otx2_reset_dwrr_prio(struct otx2_qos_node *parent, u64 prio) otx2_reset_dwrr_prio() argument 1165 is_qos_node_dwrr(struct otx2_qos_node *parent, struct otx2_nic *pfvf, u64 prio) is_qos_node_dwrr() argument 1202 otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid, u32 parent_classid, u64 rate, u64 ceil, u64 prio, u32 quantum, struct netlink_ext_ack *extack) otx2_qos_leaf_alloc_queue() argument 1341 otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid, u16 child_classid, u64 rate, u64 ceil, u64 prio, u32 quantum, struct netlink_ext_ack *extack) otx2_qos_leaf_to_inner() argument 1482 u64 prio; otx2_qos_leaf_del() local 1528 u64 prio; otx2_qos_leaf_del_last() local [all...] |
/kernel/linux/linux-6.6/tools/testing/selftests/drivers/net/ocelot/ |
H A D | basic_qos.sh | 190 local prio 192 prio="$(dcb -j app show dev ${if_name} default-prio | \ 194 if [ -z "${prio}" ]; then 195 prio=0 198 echo ${prio} 206 dcb app replace dev ${swp1} default-prio 5 210 dcb app replace dev ${swp1} default-prio ${orig} 227 dcb app add dev ${swp1} dscp-prio CS4:4 229 dcb app del dev ${swp1} dscp-prio CS [all...] |
/kernel/linux/linux-5.10/arch/powerpc/platforms/cell/spufs/ |
H A D | sched.c | 74 #define SCALE_PRIO(x, prio) \ 75 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE) 87 if (ctx->prio < NORMAL_PRIO) in spu_set_timeslice() 88 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio); in spu_set_timeslice() 90 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio); in spu_set_timeslice() 115 * policy so we have to look at ->prio in this case. in __spu_update_sched_info() 117 if (rt_prio(current->prio)) in __spu_update_sched_info() 118 ctx->prio = current->prio; in __spu_update_sched_info() 120 ctx->prio in __spu_update_sched_info() 513 int prio = ctx->prio; __spu_del_from_rq() local 825 grab_runnable_context(int prio, int node) grab_runnable_context() argument [all...] |
/kernel/linux/linux-6.6/arch/powerpc/platforms/cell/spufs/ |
H A D | sched.c | 74 #define SCALE_PRIO(x, prio) \ 75 max(x * (MAX_PRIO - prio) / (NICE_WIDTH / 2), MIN_SPU_TIMESLICE) 87 if (ctx->prio < NORMAL_PRIO) in spu_set_timeslice() 88 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio); in spu_set_timeslice() 90 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio); in spu_set_timeslice() 115 * policy so we have to look at ->prio in this case. in __spu_update_sched_info() 117 if (rt_prio(current->prio)) in __spu_update_sched_info() 118 ctx->prio = current->prio; in __spu_update_sched_info() 120 ctx->prio in __spu_update_sched_info() 507 int prio = ctx->prio; __spu_del_from_rq() local 819 grab_runnable_context(int prio, int node) grab_runnable_context() argument [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/nvkm/core/ |
H A D | intr.c | 171 int prio, leaf; in nvkm_intr() local 195 for (prio = 0; prio < ARRAY_SIZE(device->intr.prio); prio++) { in nvkm_intr() 196 list_for_each_entry(inth, &device->intr.prio[prio], head) { in nvkm_intr() 273 enum nvkm_intr_prio prio; in nvkm_intr_subdev_add_dev() local 281 prio = NVKM_INTR_PRIO_VBLANK; in nvkm_intr_subdev_add_dev() 283 prio in nvkm_intr_subdev_add_dev() 421 nvkm_inth_add(struct nvkm_intr *intr, enum nvkm_intr_type type, enum nvkm_intr_prio prio, struct nvkm_subdev *subdev, nvkm_inth_func func, struct nvkm_inth *inth) nvkm_inth_add() argument [all...] |
/kernel/linux/linux-6.6/tools/tracing/rtla/src/ |
H A D | utils.c | 395 * o:<prio> 396 * O:<prio> 398 * r:<prio> 399 * R:<prio> 401 * f:<prio> 402 * F:<prio> 409 long prio; in parse_prio() local 441 /* f:prio */ in parse_prio() 442 prio = get_long_after_colon(arg); in parse_prio() 443 if (prio in parse_prio() [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | parman.h | 68 void parman_prio_init(struct parman *parman, struct parman_prio *prio, 70 void parman_prio_fini(struct parman_prio *prio); 71 int parman_item_add(struct parman *parman, struct parman_prio *prio, 73 void parman_item_remove(struct parman *parman, struct parman_prio *prio,
|
/kernel/linux/linux-5.10/include/linux/sched/ |
H A D | prio.h | 13 * values are inverted: lower p->prio value means higher priority. 34 #define PRIO_TO_NICE(prio) ((prio) - DEFAULT_PRIO) 56 static inline long rlimit_to_nice(long prio) in rlimit_to_nice() argument 58 return (MAX_NICE - prio + 1); in rlimit_to_nice()
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | parman.h | 68 void parman_prio_init(struct parman *parman, struct parman_prio *prio, 70 void parman_prio_fini(struct parman_prio *prio); 71 int parman_item_add(struct parman *parman, struct parman_prio *prio, 73 void parman_item_remove(struct parman *parman, struct parman_prio *prio,
|
/kernel/linux/linux-6.6/include/linux/sched/ |
H A D | prio.h | 13 * values are inverted: lower p->prio value means higher priority. 34 #define PRIO_TO_NICE(prio) ((prio) - DEFAULT_PRIO) 56 static inline long rlimit_to_nice(long prio) in rlimit_to_nice() argument 58 return (MAX_NICE - prio + 1); in rlimit_to_nice()
|
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | fs_core.h | 83 * a new flow table. Meaning the last flow table in a TYPE_PRIO prio in one 85 * found in any prio in any next namespace, but skip the entire containing 86 * TYPE_PRIO_CHAINS prio. 89 * namespace inside a containing TYPE_PRIO_CHAINS prio. 252 unsigned int prio; member 333 #define fs_for_each_ns_or_ft_reverse(pos, prio) \ 334 list_for_each_entry_reverse(pos, &(prio)->node.children, list) 336 #define fs_for_each_ns_or_ft(pos, prio) \ 337 list_for_each_entry(pos, (&(prio)->node.children), list) 342 #define fs_for_each_ns(pos, prio) \ [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/ |
H A D | i915_scheduler.c | 60 i915_sched_lookup_priolist(struct i915_sched_engine *sched_engine, int prio) in i915_sched_lookup_priolist() argument 70 prio = I915_PRIORITY_NORMAL; in i915_sched_lookup_priolist() 79 if (prio > p->priority) { in i915_sched_lookup_priolist() 81 } else if (prio < p->priority) { in i915_sched_lookup_priolist() 89 if (prio == I915_PRIORITY_NORMAL) { in i915_sched_lookup_priolist() 95 prio = I915_PRIORITY_NORMAL; /* recurses just once */ in i915_sched_lookup_priolist() 110 p->priority = prio; in i915_sched_lookup_priolist() 158 const int prio = max(attr->priority, node->attr.priority); in __i915_schedule() local 167 GEM_BUG_ON(prio == I915_PRIORITY_INVALID); in __i915_schedule() 179 * static void update_priorities(struct i915_sched_node *node, prio) { in __i915_schedule() 283 sched_engine->kick_backend(node_to_request(node), prio); __i915_schedule() local [all...] |
/kernel/linux/linux-5.10/arch/ia64/kernel/ |
H A D | sys_ia64.c | 73 long prio; in ia64_getpriority() local 75 prio = sys_getpriority(which, who); in ia64_getpriority() 76 if (prio >= 0) { in ia64_getpriority() 78 prio = 20 - prio; in ia64_getpriority() 80 return prio; in ia64_getpriority()
|
/kernel/linux/linux-5.10/arch/powerpc/include/asm/ |
H A D | xive.h | 114 int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq); 116 int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, 118 void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio); 128 int xive_native_get_queue_info(u32 vp_id, uint32_t prio, 135 int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle, 137 int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
|
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_sched.c | 36 enum drm_sched_priority *prio) in amdgpu_to_sched_priority() 40 *prio = DRM_SCHED_PRIORITY_HIGH; in amdgpu_to_sched_priority() 43 *prio = DRM_SCHED_PRIORITY_HIGH; in amdgpu_to_sched_priority() 46 *prio = DRM_SCHED_PRIORITY_NORMAL; in amdgpu_to_sched_priority() 50 *prio = DRM_SCHED_PRIORITY_MIN; in amdgpu_to_sched_priority() 53 *prio = DRM_SCHED_PRIORITY_UNSET; in amdgpu_to_sched_priority() 35 amdgpu_to_sched_priority(int amdgpu_priority, enum drm_sched_priority *prio) amdgpu_to_sched_priority() argument
|
/kernel/linux/linux-6.6/arch/powerpc/include/asm/ |
H A D | xive.h | 116 int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq); 118 int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, 120 void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio); 131 int xive_native_get_queue_info(u32 vp_id, uint32_t prio, 138 int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle, 140 int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
|
/kernel/linux/linux-5.10/kernel/sched/ |
H A D | cpupri.c | 27 /* Convert between a 140 based task->prio, and our 102 based cpupri */ 28 static int convert_prio(int prio) in convert_prio() argument 32 if (prio == CPUPRI_INVALID) in convert_prio() 34 else if (prio == MAX_PRIO) in convert_prio() 36 else if (prio >= MAX_RT_PRIO) in convert_prio() 39 cpupri = MAX_RT_PRIO - prio + 1; in convert_prio() 63 * memory barriers, that can only happen when the highest prio in __cpupri_find() 127 int task_pri = convert_prio(p->prio); in cpupri_find_fitness() 227 * of the new prio is seen before we decrement the in cpupri_set() 228 * old prio in cpupri_set() [all...] |