/device/soc/rockchip/common/sdk_linux/kernel/sched/ |
H A D | pelt.c | 142 static __always_inline u32 accumulate_sum(u64 delta, struct sched_avg *sa, unsigned long load, unsigned long runnable, in accumulate_sum() argument 163 if (load) { in accumulate_sum() 167 * if (!load) in accumulate_sum() 179 if (load) { in accumulate_sum() 180 sa->load_sum += load * contrib; in accumulate_sum() 205 * following representation of historical load: 211 * This means that the contribution to load ~32ms ago (u_32) will be weighted 212 * approximately half as much as the contribution to load within the last ms 220 static __always_inline int ___update_load_sum(u64 now, struct sched_avg *sa, unsigned long load, unsigned long runnable, in ___update_load_sum() argument 257 if (!load) { in ___update_load_sum() 299 ___update_load_avg(struct sched_avg *sa, unsigned long load) ___update_load_avg() argument [all...] |
H A D | fair.c | 710 if (unlikely(se->load.weight != NICE_0_LOAD)) { in calc_delta_fair() 711 delta = fair_calc_delta(delta, NICE_0_LOAD, &se->load); in calc_delta_fair() 752 struct load_weight *load; in sched_slice() local 756 load = &cfs_rq->load; in sched_slice() 759 lw = cfs_rq->load; in sched_slice() 761 update_load_add(&lw, se->load.weight); in sched_slice() 762 load = &lw; in sched_slice() 764 slice = fair_calc_delta(slice, se->load.weight, load); in sched_slice() 1568 unsigned long load; global() member 1807 long load; task_numa_compare() local 2016 long src_load, dst_load, load; task_numa_find_cpu() local 3236 struct load_weight *load = &se->load; reweight_task() local 3320 long tg_weight, tg_shares, load, shares; calc_group_shares() local 6021 unsigned int load; cpu_load_without() local 6220 unsigned long load, min_load = ULONG_MAX; find_idlest_group_cpu() local 8146 unsigned long util, load; detach_tasks() local 8504 unsigned long load; update_cfs_rq_h_load() local 9923 unsigned long capacity, load, util; find_busiest_queue() local [all...] |
H A D | core.c | 129 * - set_user_nice(): p->se.load, p->*prio 131 * p->se.load, p->rt_priority, 912 struct load_weight *load = &p->se.load; in set_load_weight() local 918 load->weight = scale_load(WEIGHT_IDLEPRIO); in set_load_weight() 919 load->inv_weight = WMULT_IDLEPRIO; in set_load_weight() 924 * SCHED_OTHER tasks have to update their load when changing their in set_load_weight() 930 load->weight = scale_load(sched_prio_to_weight[prio]); in set_load_weight() 931 load->inv_weight = sched_prio_to_wmult[prio]; in set_load_weight() 2104 * for groups of tasks (ie. cpuset), so that load balancin in __set_cpus_allowed_ptr() [all...] |
H A D | sched.h | 175 * The extra resolution improves shares distribution and load balancing of 204 * Task weight (visible to users) and its load (invisible to users) have 591 struct load_weight load; member 619 * CFS load tracking 659 * This list is used during load balance. 829 * XXX we want to get rid of these helpers and use the full load resolution. 833 return scale_load_down(se->load.weight); in se_weight() 867 * Indicate pullable load on at least one CPU, e.g: 976 * (such as the load balancing or the thread migration code), lock 985 * remote CPUs use both these fields when doing load calculatio [all...] |
/device/soc/hisilicon/hi3861v100/sdk_liteos/platform/include/ |
H A D | hi_hwtimer.h | 135 hi_u32 hi_hwtimer_get_load(hi_timer_id timer_id, hi_u32 *load); 142 hi_u32 hi_hwrtc_get_load(hi_rtc_id rtc_id, hi_u32 *load);
|
/device/qemu/SmartL_E802/liteos_m/board/hals/csky_driver/src/ |
H A D | dw_timer.c | 253 uint32_t load; in csi_timer_start() local 260 load = (uint32_t)(timer_priv->timeout * min_us); in csi_timer_start() 262 load = (uint32_t)(((timer_priv->timeout) * drv_get_sys_freq()) / 1000000); in csi_timer_start() 268 addr->TxLoadCount = 0xffffffff; /* load time(us) */ in csi_timer_start() 271 addr->TxLoadCount = 0xffffffff; /* load time(us) */ in csi_timer_start() 273 addr->TxLoadCount = load; /* load time(us) */ in csi_timer_start()
|
/device/soc/hisilicon/hi3861v100/sdk_liteos/boot/commonboot/ |
H A D | transfer.h | 22 #include <load.h>
|
/device/soc/rockchip/rk3588/kernel/drivers/video/rockchip/rga3/ |
H A D | rga_debugger.c | 144 int load; in rga_load_show() local 148 seq_printf(m, "================= load ==================\n"); in rga_load_show() 162 load = (busy_time_total * 100000 / RGA_LOAD_INTERVAL); in rga_load_show() 163 seq_printf(m, "load = %d", load); in rga_load_show() 252 {"load", rga_load_show, NULL, NULL},
|
/device/soc/hisilicon/hi3861v100/sdk_liteos/build/scripts/ |
H A D | scons_app.py | 76 self.settings = json.load(app_cfg)
|
/device/soc/rockchip/common/sdk_linux/include/drm/ |
H A D | drm_drv.h | 165 * @load 180 int (*load)(struct drm_device *, unsigned long flags); member 247 * Reverse the effects of the driver load callback. Ideally, 249 * reverse order of the initialization. Similarly to the load
|
/device/soc/rockchip/rk3588/hardware/codec/jpeg/src/ |
H A D | codec_jpeg_decoder.cpp | 84 if (running_.load()) { in DeCode()
|
/device/board/hihope/dayu210/uboot/ |
H A D | make.sh | 611 LOAD_BIN="load${i}.bin" 690 ${SCRIPT_UBOOT} --load ${LOAD_ADDR} ${PLAT_UBOOT_SIZE}
|
/device/soc/rockchip/common/sdk_linux/drivers/gpu/drm/ |
H A D | drm_file.c | 71 * The deprecated ->load callback must be called after the driver is in drm_dev_needs_global_mutex() 76 if (dev->driver->load || dev->driver->unload) { in drm_dev_needs_global_mutex()
|
/device/soc/rockchip/common/sdk_linux/include/linux/ |
H A D | sched.h | 360 * has a few: load, load_avg, util_avg, freq, and capacity.
414 * The load/runnable/util_avg accumulates an infinite geometric series
419 * load_avg = runnable% * scale_load_down(load)
435 * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
447 * with the highest load (=88761), always runnable on a single cfs_rq,
453 * Max(load_avg) <= Max(load.weight)
507 /* For load-balancing: */
508 struct load_weight load;
member 538 * Per entity load average tracking.
809 * 'init_load_pct' represents the initial task load assigne [all...] |
/device/soc/rockchip/common/sdk_linux/drivers/regulator/ |
H A D | core.c | 972 * consumer load. All locks held by caller */
996 /* calc total requested load */
in drms_uA_update() 1007 /* set the optimum mode for our new total regulator load */
in drms_uA_update() 1010 rdev_err(rdev, "failed to set load %d: %pe\n", current_uA, ERR_PTR(err));
in drms_uA_update() 1033 /* now get the optimum mode for our new total regulator load */
in drms_uA_update() 1370 * load on the regulator) only have an effect when the consumer wants the
1373 * consumer A: set_load(100); => total load = 0
1374 * consumer A: regulator_enable(); => total load = 100
1375 * consumer B: set_load(1000); => total load = 100
1376 * consumer B: regulator_enable(); => total load 4988 int load = val; reg_debug_set_load() local [all...] |
/device/soc/rockchip/common/sdk_linux/drivers/devfreq/ |
H A D | devfreq.c | 40 * devfreq core provides delayed work based load monitoring helper 452 * devfreq_monitor_start() - Start load monitoring of devfreq instance 455 * Helper function for starting devfreq device load monitoring. By 484 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance 487 * Helper function to stop devfreq device load monitoring. Function 502 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance 505 * Helper function to suspend devfreq device load monitoring. Function 534 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance 537 * Helper function to resume devfreq device load monitoring. Function 576 * Helper function to set new load monitorin [all...] |