Lines Matching refs:pr

126 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
129 struct acpi_processor_power *pwr = &pr->power;
132 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
146 pr->power.timer_broadcast_on_state = state;
151 struct acpi_processor *pr = (struct acpi_processor *) arg;
153 if (pr->power.timer_broadcast_on_state < INT_MAX)
159 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
161 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
162 (void *)pr, 1);
166 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
169 return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
174 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
176 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
178 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
212 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
215 if (!pr->pblk)
219 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
220 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
233 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
234 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
237 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
238 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
248 pr->power.states[ACPI_STATE_C2].address = 0;
259 pr->power.states[ACPI_STATE_C3].address = 0;
264 pr->power.states[ACPI_STATE_C2].address,
265 pr->power.states[ACPI_STATE_C3].address));
267 snprintf(pr->power.states[ACPI_STATE_C2].desc,
269 pr->power.states[ACPI_STATE_C2].address);
270 snprintf(pr->power.states[ACPI_STATE_C3].desc,
272 pr->power.states[ACPI_STATE_C3].address);
277 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
279 if (!pr->power.states[ACPI_STATE_C1].valid) {
282 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
283 pr->power.states[ACPI_STATE_C1].valid = 1;
284 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
286 snprintf(pr->power.states[ACPI_STATE_C1].desc,
290 pr->power.states[ACPI_STATE_C0].valid = 1;
294 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
301 ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
305 if (!pr->power.count)
308 pr->flags.has_cst = 1;
312 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
338 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
339 bm_check_flag = pr->flags.bm_check;
340 bm_control_flag = pr->flags.bm_control;
342 pr->flags.bm_check = bm_check_flag;
343 pr->flags.bm_control = bm_control_flag;
346 if (pr->flags.bm_check) {
347 if (!pr->flags.bm_control) {
348 if (pr->flags.has_cst != 1) {
417 static int acpi_processor_power_verify(struct acpi_processor *pr)
425 pr->power.timer_broadcast_on_state = INT_MAX;
428 struct acpi_processor_cx *cx = &pr->power.states[i];
442 acpi_processor_power_verify_c3(pr, cx);
452 lapic_timer_check_state(i, pr, cx);
459 sort(&pr->power.states[1], max_cstate,
465 lapic_timer_propagate_broadcast(pr);
470 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
480 memset(pr->power.states, 0, sizeof(pr->power.states));
482 result = acpi_processor_get_power_info_cst(pr);
484 result = acpi_processor_get_power_info_fadt(pr);
489 acpi_processor_get_power_info_default(pr);
491 pr->power.count = acpi_processor_power_verify(pr);
498 if (pr->power.states[i].valid) {
499 pr->power.count = i;
500 pr->flags.power = 1;
613 static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
615 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
625 * @pr: Target processor
630 struct acpi_processor *pr,
646 bool dis_bm = pr->flags.bm_control;
690 struct acpi_processor *pr;
692 pr = __this_cpu_read(processors);
693 if (unlikely(!pr))
697 if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
698 return acpi_idle_enter_bm(drv, pr, cx, index);
701 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
721 struct acpi_processor *pr = __this_cpu_read(processors);
723 if (unlikely(!pr))
726 if (pr->flags.bm_check) {
731 acpi_idle_enter_bm(drv, pr, cx, index);
744 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
756 cx = &pr->power.states[i];
763 if (lapic_timer_needs_broadcast(pr, cx))
768 if (pr->flags.bm_check)
783 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
801 cx = &pr->power.states[i];
825 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
863 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
868 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
874 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
1055 static int flatten_lpi_states(struct acpi_processor *pr,
1076 flpi = &pr->power.lpi_states[flat_state_cnt];
1105 static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1109 acpi_handle handle = pr->handle, pr_ahandle;
1114 ret = acpi_processor_ffh_lpi_probe(pr->id);
1127 handle = pr->handle;
1131 flatten_lpi_states(pr, prev, NULL);
1150 flatten_lpi_states(pr, curr, prev);
1157 pr->power.count = flat_state_cnt;
1159 for (i = 0; i < pr->power.count; i++)
1160 pr->power.lpi_states[i].index = i;
1163 pr->flags.has_lpi = 1;
1164 pr->flags.power = 1;
1185 struct acpi_processor *pr;
1188 pr = __this_cpu_read(processors);
1190 if (unlikely(!pr))
1193 lpi = &pr->power.lpi_states[index];
1200 static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1207 if (!pr->flags.has_lpi)
1210 for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1211 lpi = &pr->power.lpi_states[i];
1233 * @pr: the ACPI processor
1235 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1240 if (!pr->flags.power_setup_done || !pr->flags.power)
1249 if (pr->flags.has_lpi)
1250 return acpi_processor_setup_lpi_states(pr);
1252 return acpi_processor_setup_cstates(pr);
1259 * @pr: the ACPI processor
1262 static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1265 if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1268 dev->cpu = pr->id;
1269 if (pr->flags.has_lpi)
1270 return acpi_processor_ffh_lpi_probe(pr->id);
1272 return acpi_processor_setup_cpuidle_cx(pr, dev);
1275 static int acpi_processor_get_power_info(struct acpi_processor *pr)
1279 ret = acpi_processor_get_lpi_info(pr);
1281 ret = acpi_processor_get_cstate_info(pr);
1286 int acpi_processor_hotplug(struct acpi_processor *pr)
1294 if (!pr->flags.power_setup_done)
1297 dev = per_cpu(acpi_cpuidle_device, pr->id);
1300 ret = acpi_processor_get_power_info(pr);
1301 if (!ret && pr->flags.power) {
1302 acpi_processor_setup_cpuidle_dev(pr, dev);
1310 int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
1319 if (!pr->flags.power_setup_done)
1328 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1344 acpi_processor_get_power_info(pr);
1345 acpi_processor_setup_cpuidle_states(pr);
1368 int acpi_processor_power_init(struct acpi_processor *pr)
1378 if (!acpi_processor_get_power_info(pr))
1379 pr->flags.power_setup_done = 1;
1386 if (pr->flags.power) {
1389 acpi_processor_setup_cpuidle_states(pr);
1400 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1402 acpi_processor_setup_cpuidle_dev(pr, dev);
1418 int acpi_processor_power_exit(struct acpi_processor *pr)
1420 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1425 if (pr->flags.power) {
1434 pr->flags.power_setup_done = 0;