/third_party/node/test/parallel/ |
H A D | test-performance-eventlooputil.js | 18 assert.deepStrictEqual(elu, { idle: 0, active: 0, utilization: 0 }); 20 { idle: 0, active: 0, utilization: 0 }); 22 { idle: 0, active: 0, utilization: 0 }); 35 // Force idle time to accumulate before allowing test to continue. 36 if (elu1.idle <= 0) 46 assert.strictEqual(elu2.idle, 0); 47 assert.strictEqual(elu4.idle, 0); 62 const sum = elu1.idle + elu1.active; 64 assert.ok(sum >= elu1.idle && sum >= elu1.active, 65 `idle [all...] |
/third_party/node/lib/internal/perf/ |
H A D | event_loop_utilization.js | 11 return { idle: 0, active: 0, utilization: 0 }; 15 const idle = util1.idle - util2.idle; 17 return { idle, active, utilization: active / (idle + active) }; 20 const idle = nodeTiming.idleTime; 21 const active = now() - ls - idle; 24 return { idle, active, utilization: active / (idle [all...] |
/third_party/libuv/src/unix/ |
H A D | tcp.c | 456 int idle; in uv__tcp_keepalive() local 460 (void) &idle; in uv__tcp_keepalive() 479 * - By default, the first keep-alive probe is sent out after a TCP connection is idle for two hours. in uv__tcp_keepalive() 496 idle = delay; in uv__tcp_keepalive() 498 if (idle < 10) in uv__tcp_keepalive() 499 idle = 10; in uv__tcp_keepalive() 501 if (idle > 10*24*60*60) in uv__tcp_keepalive() 502 idle = 10*24*60*60; in uv__tcp_keepalive() 507 if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(idle))) in uv__tcp_keepalive() [all...] |
/third_party/node/lib/internal/ |
H A D | worker.js | 503 return { idle: 0, active: 0, utilization: 0 }; 510 return { idle: 0, active: 0, utilization: 0 }; 514 const idle = util1.idle - util2.idle; 516 return { idle, active, utilization: active / (idle + active) }; 519 const idle = this[kHandle].loopIdleTime(); 525 const active = now() - this[kLoopStartTime] - idle; 528 return { idle, activ [all...] |
/third_party/libuv/test/ |
H A D | test-tcp-oob.c | 34 static uv_idle_t idle; variable 47 static void idle_cb(uv_idle_t* idle) { in idle_cb() argument 54 uv_close((uv_handle_t*) idle, NULL); in idle_cb() 66 ASSERT_OK(uv_idle_start(&idle, idle_cb)); in read_cb() 123 ASSERT_OK(uv_idle_init(loop, &idle)); in TEST_IMPL()
|
H A D | test-poll-oob.c | 37 static uv_idle_t idle; variable 55 static void idle_cb(uv_idle_t* idle) { in idle_cb() argument 65 uv_close((uv_handle_t*) idle, NULL); in idle_cb() 163 ASSERT_OK(uv_idle_start(&idle, idle_cb)); in connection_cb() 178 ASSERT_OK(uv_idle_init(loop, &idle)); in TEST_IMPL()
|
H A D | test-handle-fileno.c | 55 uv_idle_t idle; in TEST_IMPL() local 61 r = uv_idle_init(loop, &idle); in TEST_IMPL() 63 r = uv_fileno((uv_handle_t*) &idle, &fd); in TEST_IMPL() 65 uv_close((uv_handle_t*) &idle, NULL); in TEST_IMPL()
|
H A D | test-queue-foreach-delete.c | 122 DEFINE_GLOBALS_AND_CBS(idle, uv_idle_t* handle) 176 INIT_AND_START(idle, loop); in TEST_IMPL() 194 END_ASSERTS(idle); in TEST_IMPL()
|
/third_party/node/test/node-api/test_uv_loop/ |
H A D | test_uv_loop.cc | 30 uv_idle_t* idle = new uv_idle_t; in SetImmediate() local 31 uv_idle_init(loop, idle); in SetImmediate() 32 uv_idle_start(idle, [](uv_idle_t* idle) { in SetImmediate() 33 uv_close(reinterpret_cast<uv_handle_t*>(idle), [](uv_handle_t* handle) { in SetImmediate()
|
/third_party/node/test/sequential/ |
H A D | test-worker-eventlooputil.js | 50 // Force some idle time to accumulate before proceeding with test. 51 if (eventLoopUtilization().idle <= 0) 67 { idle: 0, active: 0, utilization: 0 }); 88 // Cutting the idle time in half since it's possible that the call took a 90 assert.ok(wElu.idle >= 25, `${wElu.idle} < 25`); 113 return elu.idle + elu.active;
|
/third_party/backends/backend/escl/ |
H A D | escl_status.c | 35 struct idle struct 51 struct idle *mem = (struct idle *)userp; in memory_callback_s() 200 struct idle *var = NULL; in escl_status() 213 var = (struct idle*)calloc(1, sizeof(struct idle)); in escl_status()
|
/third_party/mesa3d/src/gallium/drivers/r600/ |
H A D | r600_gpu_load.c | 30 * frequency and the "busy" or "idle" counter is incremented based on 79 p_atomic_inc(&counters->named.field.idle); \ 172 unsigned idle = p_atomic_read(&rscreen->mmio_counters.array[busy_index + 1]); in r600_read_mmio_counter() local 174 return busy | ((uint64_t)idle << 32); in r600_read_mmio_counter() 182 unsigned idle = (end >> 32) - (begin >> 32); in r600_end_mmio_counter() local 190 if (idle || busy) { in r600_end_mmio_counter() 191 return busy*100 / (busy + idle); in r600_end_mmio_counter()
|
/device/soc/rockchip/common/vendor/drivers/rockchip/ |
H A D | pm_domains.c | 125 #define DOMAIN(_name, pwr, status, req, idle, ack, wakeup, keepon) \
127 .name = (_name), .pwr_mask = (pwr), .status_mask = (status), .req_mask = (req), .idle_mask = (idle), \
131 #define DOMAIN_M(_name, pwr, status, req, idle, ack, wakeup, keepon) \
134 .req_w_mask = (req) << 16, .req_mask = (req), .idle_mask = (idle), .ack_mask = (ack), \
138 #define DOMAIN_M_O(_name, pwr, status, p_offset, req, idle, ack, r_offset, wakeup, keepon) \
141 .req_w_mask = (req) << 16, .req_mask = (req), .idle_mask = (idle), .ack_mask = (ack), \
145 #define DOMAIN_M_O_R(_name, p_offset, pwr, status, r_status, r_offset, req, idle, ack, wakeup, keepon) \
149 .req_w_mask = (req) << 16, .req_mask = (req), .idle_mask = (idle), .ack_mask = (ack), \
153 #define DOMAIN_RK3036(_name, req, ack, idle, wakeup) \
155 .name = (_name), .req_mask = (req), .req_w_mask = (req) << 16, .ack_mask = (ack), .idle_mask = (idle), \
215 rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd, bool idle) rockchip_pmu_set_idle_request() argument 263 rockchip_pmu_idle_request(struct device *dev, bool idle) rockchip_pmu_idle_request() argument [all...] |
/device/soc/rockchip/common/sdk_linux/drivers/soc/rockchip/ |
H A D | pm_domains.c | 136 #define DOMAIN(_name, pwr, status, req, idle, ack, wakeup, keepon) \
138 .name = (_name), .pwr_mask = (pwr), .status_mask = (status), .req_mask = (req), .idle_mask = (idle), \
142 #define DOMAIN_M(_name, pwr, status, req, idle, ack, wakeup, keepon) \
145 .req_w_mask = (req) << 16, .req_mask = (req), .idle_mask = (idle), .ack_mask = (ack), \
149 #define DOMAIN_M_O(_name, pwr, status, p_offset, req, idle, ack, r_offset, wakeup, keepon) \
152 .req_w_mask = (req) << 16, .req_mask = (req), .idle_mask = (idle), .ack_mask = (ack), \
156 #define DOMAIN_M_O_R(_name, p_offset, pwr, status, r_status, r_offset, req, idle, ack, wakeup, keepon) \
160 .req_w_mask = (req) << 16, .req_mask = (req), .idle_mask = (idle), .ack_mask = (ack), \
164 #define DOMAIN_RK3036(_name, req, ack, idle, wakeup) \
166 .name = (_name), .req_mask = (req), .req_w_mask = (req) << 16, .ack_mask = (ack), .idle_mask = (idle), \
226 rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd, bool idle) rockchip_pmu_set_idle_request() argument 275 rockchip_pmu_idle_request(struct device *dev, bool idle) rockchip_pmu_idle_request() argument [all...] |
/third_party/python/PCbuild/ |
H A D | idle.bat | 2 rem start idle
3 rem Usage: idle [-d]
16 set cmd=%exedir%\%exe% %PCBUILD%\..\Lib\idlelib\idle.py %1 %2 %3 %4 %5 %6 %7 %8 %9
|
/third_party/libwebsockets/lib/event-libs/glib/ |
H A D | glib.c | 76 if (lws_gs_valid(pt_to_priv_glib(pt)->idle)) in lws_glib_set_idle() 79 pt_to_priv_glib(pt)->idle.gs = g_idle_source_new(); in lws_glib_set_idle() 80 if (!pt_to_priv_glib(pt)->idle.gs) in lws_glib_set_idle() 83 g_source_set_callback(pt_to_priv_glib(pt)->idle.gs, in lws_glib_set_idle() 85 pt_to_priv_glib(pt)->idle.tag = g_source_attach( in lws_glib_set_idle() 86 pt_to_priv_glib(pt)->idle.gs, pt_to_g_main_context(pt)); in lws_glib_set_idle() 144 if (!lws_gs_valid(pt_to_priv_glib(pt)->idle)) in lws_glib_dispatch() 220 * For glib, this disables the idle callback. Otherwise we keep in lws_glib_idle_timer_cb() 223 * We reenable the idle callback on the next network or scheduled event in lws_glib_idle_timer_cb() 226 lws_gs_destroy(pt_to_priv_glib(pt)->idle); in lws_glib_idle_timer_cb() [all...] |
/third_party/mesa3d/src/gallium/drivers/radeonsi/ |
H A D | si_gpu_load.c | 28 * frequency and the "busy" or "idle" counter is incremented based on 77 p_atomic_inc(&counters->named.field.idle); \ 185 unsigned idle = p_atomic_read(&sscreen->mmio_counters.array[busy_index + 1]); in si_read_mmio_counter() local 187 return busy | ((uint64_t)idle << 32); in si_read_mmio_counter() 194 unsigned idle = (end >> 32) - (begin >> 32); in si_end_mmio_counter() local 202 if (idle || busy) { in si_end_mmio_counter() 203 return busy * 100 / (busy + idle); in si_end_mmio_counter()
|
/device/soc/rockchip/common/kernel/drivers/gpu/arm/bifrost/tests/mali_kutf_clk_rate_trace/kernel/ |
H A D | mali_kutf_clk_rate_trace_test.c | 242 bool idle; in kutf_clk_trace_do_get_rate() local 259 idle = kbdev->pm.clk_rtm.gpu_idle; in kutf_clk_trace_do_get_rate() 265 rate, idle); in kutf_clk_trace_do_get_rate() 610 * 1). GPU idle on test start, trace rate should be 0 (low power state) 621 bool idle[2] = { false }; in kutf_clk_trace_barebone_check() local 625 /* Check consistency if gpu happens to be idle */ in kutf_clk_trace_barebone_check() 627 idle[0] = kbdev->pm.clk_rtm.gpu_idle; in kutf_clk_trace_barebone_check() 641 pr_err("Trace did not see idle rate\n"); in kutf_clk_trace_barebone_check() 651 idle[1] = kbdev->pm.clk_rtm.gpu_idle; in kutf_clk_trace_barebone_check() 662 if (idle[ in kutf_clk_trace_barebone_check() [all...] |
/device/soc/rockchip/common/sdk_linux/include/soc/rockchip/ |
H A D | pm_domains.h | 13 int rockchip_pmu_idle_request(struct device *dev, bool idle); 33 static inline int rockchip_pmu_idle_request(struct device *dev, bool idle) in rockchip_pmu_idle_request() argument
|
/device/soc/rockchip/rk3588/kernel/include/soc/rockchip/ |
H A D | pm_domains.h | 13 int rockchip_pmu_idle_request(struct device *dev, bool idle); 33 static inline int rockchip_pmu_idle_request(struct device *dev, bool idle) in rockchip_pmu_idle_request() argument
|
/third_party/node/benchmark/perf_hooks/ |
H A D | bench-eventlooputil.js | 44 assert(elu.active + elu.idle > 0); 57 assert(elu.active + elu.idle > 0);
|
/third_party/libwebsockets/lib/event-libs/libuv/ |
H A D | libuv.c | 61 struct lws_pt_eventlibs_libuv, idle); in lws_uv_idle() 85 /* if there is nobody who needs service forcing, shut down idle */ in lws_uv_idle() 155 uv_idle_start(&ptpriv->idle, lws_uv_idle); in lws_io_cb() 694 uv_idle_stop(&pt_to_priv_uv(pt)->idle); in elops_destroy_pt_uv() 695 uv_close((uv_handle_t *)&pt_to_priv_uv(pt)->idle, lws_uv_close_cb_sa); in elops_destroy_pt_uv() 746 uv_idle_init(loop, &ptpriv->idle); in elops_init_pt_uv() 747 LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&ptpriv->idle, pt); in elops_init_pt_uv() 748 uv_idle_start(&ptpriv->idle, lws_uv_idle); in elops_init_pt_uv()
|
/device/soc/rockchip/common/sdk_linux/kernel/sched/ |
H A D | fair.c | 1718 /* Find alternative idle CPU. */ in task_numa_assign() 1732 /* Failed to find an alternative idle CPU */ in task_numa_assign() 1939 /* Evaluate an idle CPU for a task numa move. */ in task_numa_compare() 1943 /* Nothing cached so current CPU went idle since the search. */ in task_numa_compare() 1949 * If the CPU is no longer truly idle and the previous best CPU in task_numa_compare() 1962 * If a move to idle is allowed because there is capacity or load in task_numa_compare() 2006 /* Use idle CPU if there is no imbalance */ in task_numa_find_cpu() 2262 * completely idle or all activity is areas that are not of interest in update_task_scan_period() 3276 * conditions. In specific, the case where the group was idle and we start the 3286 * That is, the sum collapses because all other CPUs are idle; th 5464 int idle = 0; sched_cfs_period_timer() local 6245 struct cpuidle_state *idle = idle_get_state(rq); find_idlest_group_cpu() local 6417 bool idle = true; select_idle_core() local 7867 enum cpu_idle_type idle; global() member 10151 load_balance(int this_cpu, struct rq *this_rq, struct sched_domain *sd, enum cpu_idle_type idle, int *continue_balancing) load_balance() argument 10632 rebalance_domains(struct rq *rq, enum cpu_idle_type idle) rebalance_domains() argument 11063 _nohz_idle_balance(struct rq *this_rq, unsigned int flags, enum cpu_idle_type idle) _nohz_idle_balance() argument 11179 nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) nohz_idle_balance() argument 11238 nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) nohz_idle_balance() argument 11385 enum cpu_idle_type idle = this_rq->idle_balance ? CPU_IDLE : CPU_NOT_IDLE; run_rebalance_domains() local [all...] |
/third_party/mbedtls/programs/ssl/ |
H A D | ssl_client2.c | 2232 ret = idle(&server_fd, &timer, ret); in main() 2234 ret = idle(&server_fd, ret); in main() 2475 idle(&server_fd, &timer, ret); in main() 2477 idle(&server_fd, ret); in main() 2524 idle(&server_fd, &timer, ret); in main() 2526 idle(&server_fd, ret); in main() 2552 idle(&server_fd, &timer, ret); in main() 2554 idle(&server_fd, ret); in main() 2614 idle(&server_fd, &timer, ret); in main() 2616 idle( in main() [all...] |
/third_party/backends/backend/pixma/ |
H A D | pixma.c | 103 /* valid states: idle, !idle && scanning, !idle && !scanning */ 104 SANE_Bool idle; member 106 SANE_Status last_read_status; /* valid if !idle && !scanning */ 1229 ss->idle = SANE_TRUE; in terminate_reader_task() 1777 ss->idle = SANE_TRUE; in sane_open() 1836 if (!ss->idle && a != SANE_ACTION_GET_VALUE) in sane_control_option() 1838 PDBG (pixma_dbg (3, "Warning: !idle && !SANE_ACTION_GET_VALUE\n")); in sane_control_option() 1881 if (!ss->idle) in sane_get_parameters() [all...] |