1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
4 *
5 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
8 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 *  			- Added processor hotplug support
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/cpufreq.h>
16#include <linux/slab.h>
17#include <linux/acpi.h>
18#include <acpi/processor.h>
19#ifdef CONFIG_X86
20#include <asm/cpufeature.h>
21#endif
22
23#define PREFIX "ACPI: "
24
25#define ACPI_PROCESSOR_CLASS		"processor"
26#define ACPI_PROCESSOR_FILE_PERFORMANCE	"performance"
27#define _COMPONENT		ACPI_PROCESSOR_COMPONENT
28ACPI_MODULE_NAME("processor_perflib");
29
30static DEFINE_MUTEX(performance_mutex);
31
32/*
33 * _PPC support is implemented as a CPUfreq policy notifier:
34 * This means each time a CPUfreq driver registered also with
35 * the ACPI core is asked to change the speed policy, the maximum
36 * value is adjusted so that it is within the platform limit.
37 *
38 * Also, when a new platform limit value is detected, the CPUfreq
39 * policy is adjusted accordingly.
40 */
41
42/* ignore_ppc:
43 * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet
44 *       ignore _PPC
45 *  0 -> cpufreq low level drivers initialized -> consider _PPC values
46 *  1 -> ignore _PPC totally -> forced by user through boot param
47 */
48static int ignore_ppc = -1;
49module_param(ignore_ppc, int, 0644);
50MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
51		 "limited by BIOS, this should help");
52
53static bool acpi_processor_ppc_in_use;
54
55static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
56{
57	acpi_status status = 0;
58	unsigned long long ppc = 0;
59	s32 qos_value;
60	int index;
61	int ret;
62
63	if (!pr)
64		return -EINVAL;
65
66	/*
67	 * _PPC indicates the maximum state currently supported by the platform
68	 * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
69	 */
70	status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
71
72	if (status != AE_NOT_FOUND)
73		acpi_processor_ppc_in_use = true;
74
75	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
76		ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC"));
77		return -ENODEV;
78	}
79
80	index = ppc;
81
82	if (pr->performance_platform_limit == index ||
83	    ppc >= pr->performance->state_count)
84		return 0;
85
86	pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
87		 index, index ? "is" : "is not");
88
89	pr->performance_platform_limit = index;
90
91	if (unlikely(!freq_qos_request_active(&pr->perflib_req)))
92		return 0;
93
94	/*
95	 * If _PPC returns 0, it means that all of the available states can be
96	 * used ("no limit").
97	 */
98	if (index == 0)
99		qos_value = FREQ_QOS_MAX_DEFAULT_VALUE;
100	else
101		qos_value = pr->performance->states[index].core_frequency * 1000;
102
103	ret = freq_qos_update_request(&pr->perflib_req, qos_value);
104	if (ret < 0) {
105		pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
106			pr->id, ret);
107	}
108
109	return 0;
110}
111
112#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE	0x80
113/*
114 * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status
115 * @handle: ACPI processor handle
116 * @status: the status code of _PPC evaluation
117 *	0: success. OSPM is now using the performance state specificed.
118 *	1: failure. OSPM has not changed the number of P-states in use
119 */
120static void acpi_processor_ppc_ost(acpi_handle handle, int status)
121{
122	if (acpi_has_method(handle, "_OST"))
123		acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE,
124				  status, NULL);
125}
126
127void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
128{
129	int ret;
130
131	if (ignore_ppc || !pr->performance) {
132		/*
133		 * Only when it is notification event, the _OST object
134		 * will be evaluated. Otherwise it is skipped.
135		 */
136		if (event_flag)
137			acpi_processor_ppc_ost(pr->handle, 1);
138		return;
139	}
140
141	ret = acpi_processor_get_platform_limit(pr);
142	/*
143	 * Only when it is notification event, the _OST object
144	 * will be evaluated. Otherwise it is skipped.
145	 */
146	if (event_flag) {
147		if (ret < 0)
148			acpi_processor_ppc_ost(pr->handle, 1);
149		else
150			acpi_processor_ppc_ost(pr->handle, 0);
151	}
152	if (ret >= 0)
153		cpufreq_update_limits(pr->id);
154}
155
156int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
157{
158	struct acpi_processor *pr;
159
160	pr = per_cpu(processors, cpu);
161	if (!pr || !pr->performance || !pr->performance->state_count)
162		return -ENODEV;
163	*limit = pr->performance->states[pr->performance_platform_limit].
164		core_frequency * 1000;
165	return 0;
166}
167EXPORT_SYMBOL(acpi_processor_get_bios_limit);
168
169void acpi_processor_ignore_ppc_init(void)
170{
171	if (ignore_ppc < 0)
172		ignore_ppc = 0;
173}
174
175void acpi_processor_ppc_init(struct cpufreq_policy *policy)
176{
177	unsigned int cpu;
178
179	for_each_cpu(cpu, policy->related_cpus) {
180		struct acpi_processor *pr = per_cpu(processors, cpu);
181		int ret;
182
183		if (!pr)
184			continue;
185
186		/*
187		 * Reset performance_platform_limit in case there is a stale
188		 * value in it, so as to make it match the "no limit" QoS value
189		 * below.
190		 */
191		pr->performance_platform_limit = 0;
192
193		ret = freq_qos_add_request(&policy->constraints,
194					   &pr->perflib_req, FREQ_QOS_MAX,
195					   FREQ_QOS_MAX_DEFAULT_VALUE);
196		if (ret < 0)
197			pr_err("Failed to add freq constraint for CPU%d (%d)\n",
198			       cpu, ret);
199	}
200}
201
202void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
203{
204	unsigned int cpu;
205
206	for_each_cpu(cpu, policy->related_cpus) {
207		struct acpi_processor *pr = per_cpu(processors, cpu);
208
209		if (pr)
210			freq_qos_remove_request(&pr->perflib_req);
211	}
212}
213
214static int acpi_processor_get_performance_control(struct acpi_processor *pr)
215{
216	int result = 0;
217	acpi_status status = 0;
218	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
219	union acpi_object *pct = NULL;
220	union acpi_object obj = { 0 };
221
222
223	status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
224	if (ACPI_FAILURE(status)) {
225		ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT"));
226		return -ENODEV;
227	}
228
229	pct = (union acpi_object *)buffer.pointer;
230	if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
231	    || (pct->package.count != 2)) {
232		printk(KERN_ERR PREFIX "Invalid _PCT data\n");
233		result = -EFAULT;
234		goto end;
235	}
236
237	/*
238	 * control_register
239	 */
240
241	obj = pct->package.elements[0];
242
243	if ((obj.type != ACPI_TYPE_BUFFER)
244	    || (obj.buffer.length < sizeof(struct acpi_pct_register))
245	    || (obj.buffer.pointer == NULL)) {
246		printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n");
247		result = -EFAULT;
248		goto end;
249	}
250	memcpy(&pr->performance->control_register, obj.buffer.pointer,
251	       sizeof(struct acpi_pct_register));
252
253	/*
254	 * status_register
255	 */
256
257	obj = pct->package.elements[1];
258
259	if ((obj.type != ACPI_TYPE_BUFFER)
260	    || (obj.buffer.length < sizeof(struct acpi_pct_register))
261	    || (obj.buffer.pointer == NULL)) {
262		printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n");
263		result = -EFAULT;
264		goto end;
265	}
266
267	memcpy(&pr->performance->status_register, obj.buffer.pointer,
268	       sizeof(struct acpi_pct_register));
269
270      end:
271	kfree(buffer.pointer);
272
273	return result;
274}
275
276#ifdef CONFIG_X86
277/*
278 * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
279 * in their ACPI data. Calculate the real values and fix up the _PSS data.
280 */
281static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
282{
283	u32 hi, lo, fid, did;
284	int index = px->control & 0x00000007;
285
286	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
287		return;
288
289	if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
290	    || boot_cpu_data.x86 == 0x11) {
291		rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
292		/*
293		 * MSR C001_0064+:
294		 * Bit 63: PstateEn. Read-write. If set, the P-state is valid.
295		 */
296		if (!(hi & BIT(31)))
297			return;
298
299		fid = lo & 0x3f;
300		did = (lo >> 6) & 7;
301		if (boot_cpu_data.x86 == 0x10)
302			px->core_frequency = (100 * (fid + 0x10)) >> did;
303		else
304			px->core_frequency = (100 * (fid + 8)) >> did;
305	}
306}
307#else
308static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
309#endif
310
311static int acpi_processor_get_performance_states(struct acpi_processor *pr)
312{
313	int result = 0;
314	acpi_status status = AE_OK;
315	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
316	struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
317	struct acpi_buffer state = { 0, NULL };
318	union acpi_object *pss = NULL;
319	int i;
320	int last_invalid = -1;
321
322
323	status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
324	if (ACPI_FAILURE(status)) {
325		ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS"));
326		return -ENODEV;
327	}
328
329	pss = buffer.pointer;
330	if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
331		printk(KERN_ERR PREFIX "Invalid _PSS data\n");
332		result = -EFAULT;
333		goto end;
334	}
335
336	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n",
337			  pss->package.count));
338
339	pr->performance->state_count = pss->package.count;
340	pr->performance->states =
341	    kmalloc_array(pss->package.count,
342			  sizeof(struct acpi_processor_px),
343			  GFP_KERNEL);
344	if (!pr->performance->states) {
345		result = -ENOMEM;
346		goto end;
347	}
348
349	for (i = 0; i < pr->performance->state_count; i++) {
350
351		struct acpi_processor_px *px = &(pr->performance->states[i]);
352
353		state.length = sizeof(struct acpi_processor_px);
354		state.pointer = px;
355
356		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
357
358		status = acpi_extract_package(&(pss->package.elements[i]),
359					      &format, &state);
360		if (ACPI_FAILURE(status)) {
361			ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data"));
362			result = -EFAULT;
363			kfree(pr->performance->states);
364			goto end;
365		}
366
367		amd_fixup_frequency(px, i);
368
369		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
370				  "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
371				  i,
372				  (u32) px->core_frequency,
373				  (u32) px->power,
374				  (u32) px->transition_latency,
375				  (u32) px->bus_master_latency,
376				  (u32) px->control, (u32) px->status));
377
378		/*
379		 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
380		 */
381		if (!px->core_frequency ||
382		    ((u32)(px->core_frequency * 1000) !=
383		     (px->core_frequency * 1000))) {
384			printk(KERN_ERR FW_BUG PREFIX
385			       "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
386			       pr->id, px->core_frequency);
387			if (last_invalid == -1)
388				last_invalid = i;
389		} else {
390			if (last_invalid != -1) {
391				/*
392				 * Copy this valid entry over last_invalid entry
393				 */
394				memcpy(&(pr->performance->states[last_invalid]),
395				       px, sizeof(struct acpi_processor_px));
396				++last_invalid;
397			}
398		}
399	}
400
401	if (last_invalid == 0) {
402		printk(KERN_ERR FW_BUG PREFIX
403		       "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
404		result = -EFAULT;
405		kfree(pr->performance->states);
406		pr->performance->states = NULL;
407	}
408
409	if (last_invalid > 0)
410		pr->performance->state_count = last_invalid;
411
412      end:
413	kfree(buffer.pointer);
414
415	return result;
416}
417
418int acpi_processor_get_performance_info(struct acpi_processor *pr)
419{
420	int result = 0;
421
422	if (!pr || !pr->performance || !pr->handle)
423		return -EINVAL;
424
425	if (!acpi_has_method(pr->handle, "_PCT")) {
426		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
427				  "ACPI-based processor performance control unavailable\n"));
428		return -ENODEV;
429	}
430
431	result = acpi_processor_get_performance_control(pr);
432	if (result)
433		goto update_bios;
434
435	result = acpi_processor_get_performance_states(pr);
436	if (result)
437		goto update_bios;
438
439	/* We need to call _PPC once when cpufreq starts */
440	if (ignore_ppc != 1)
441		result = acpi_processor_get_platform_limit(pr);
442
443	return result;
444
445	/*
446	 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
447	 * the BIOS is older than the CPU and does not know its frequencies
448	 */
449 update_bios:
450#ifdef CONFIG_X86
451	if (acpi_has_method(pr->handle, "_PPC")) {
452		if(boot_cpu_has(X86_FEATURE_EST))
453			printk(KERN_WARNING FW_BUG "BIOS needs update for CPU "
454			       "frequency support\n");
455	}
456#endif
457	return result;
458}
459EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
460
461int acpi_processor_pstate_control(void)
462{
463	acpi_status status;
464
465	if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control)
466		return 0;
467
468	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
469			  "Writing pstate_control [0x%x] to smi_command [0x%x]\n",
470			  acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
471
472	status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
473				    (u32)acpi_gbl_FADT.pstate_control, 8);
474	if (ACPI_SUCCESS(status))
475		return 1;
476
477	ACPI_EXCEPTION((AE_INFO, status,
478			"Failed to write pstate_control [0x%x] to smi_command [0x%x]",
479			acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
480	return -EIO;
481}
482
483int acpi_processor_notify_smm(struct module *calling_module)
484{
485	static int is_done = 0;
486	int result;
487
488	if (!acpi_processor_cpufreq_init)
489		return -EBUSY;
490
491	if (!try_module_get(calling_module))
492		return -EINVAL;
493
494	/* is_done is set to negative if an error occurred,
495	 * and to postitive if _no_ error occurred, but SMM
496	 * was already notified. This avoids double notification
497	 * which might lead to unexpected results...
498	 */
499	if (is_done > 0) {
500		module_put(calling_module);
501		return 0;
502	} else if (is_done < 0) {
503		module_put(calling_module);
504		return is_done;
505	}
506
507	is_done = -EIO;
508
509	result = acpi_processor_pstate_control();
510	if (!result) {
511		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n"));
512		module_put(calling_module);
513		return 0;
514	}
515	if (result < 0) {
516		module_put(calling_module);
517		return result;
518	}
519
520	/* Success. If there's no _PPC, we need to fear nothing, so
521	 * we can allow the cpufreq driver to be rmmod'ed. */
522	is_done = 1;
523
524	if (!acpi_processor_ppc_in_use)
525		module_put(calling_module);
526
527	return 0;
528}
529
530EXPORT_SYMBOL(acpi_processor_notify_smm);
531
532int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
533{
534	int result = 0;
535	acpi_status status = AE_OK;
536	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
537	struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
538	struct acpi_buffer state = {0, NULL};
539	union acpi_object  *psd = NULL;
540
541	status = acpi_evaluate_object(handle, "_PSD", NULL, &buffer);
542	if (ACPI_FAILURE(status)) {
543		return -ENODEV;
544	}
545
546	psd = buffer.pointer;
547	if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
548		printk(KERN_ERR PREFIX "Invalid _PSD data\n");
549		result = -EFAULT;
550		goto end;
551	}
552
553	if (psd->package.count != 1) {
554		printk(KERN_ERR PREFIX "Invalid _PSD data\n");
555		result = -EFAULT;
556		goto end;
557	}
558
559	state.length = sizeof(struct acpi_psd_package);
560	state.pointer = pdomain;
561
562	status = acpi_extract_package(&(psd->package.elements[0]),
563		&format, &state);
564	if (ACPI_FAILURE(status)) {
565		printk(KERN_ERR PREFIX "Invalid _PSD data\n");
566		result = -EFAULT;
567		goto end;
568	}
569
570	if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
571		printk(KERN_ERR PREFIX "Unknown _PSD:num_entries\n");
572		result = -EFAULT;
573		goto end;
574	}
575
576	if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
577		printk(KERN_ERR PREFIX "Unknown _PSD:revision\n");
578		result = -EFAULT;
579		goto end;
580	}
581
582	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
583	    pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
584	    pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
585		printk(KERN_ERR PREFIX "Invalid _PSD:coord_type\n");
586		result = -EFAULT;
587		goto end;
588	}
589end:
590	kfree(buffer.pointer);
591	return result;
592}
593EXPORT_SYMBOL(acpi_processor_get_psd);
594
595int acpi_processor_preregister_performance(
596		struct acpi_processor_performance __percpu *performance)
597{
598	int count_target;
599	int retval = 0;
600	unsigned int i, j;
601	cpumask_var_t covered_cpus;
602	struct acpi_processor *pr;
603	struct acpi_psd_package *pdomain;
604	struct acpi_processor *match_pr;
605	struct acpi_psd_package *match_pdomain;
606
607	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
608		return -ENOMEM;
609
610	mutex_lock(&performance_mutex);
611
612	/*
613	 * Check if another driver has already registered, and abort before
614	 * changing pr->performance if it has. Check input data as well.
615	 */
616	for_each_possible_cpu(i) {
617		pr = per_cpu(processors, i);
618		if (!pr) {
619			/* Look only at processors in ACPI namespace */
620			continue;
621		}
622
623		if (pr->performance) {
624			retval = -EBUSY;
625			goto err_out;
626		}
627
628		if (!performance || !per_cpu_ptr(performance, i)) {
629			retval = -EINVAL;
630			goto err_out;
631		}
632	}
633
634	/* Call _PSD for all CPUs */
635	for_each_possible_cpu(i) {
636		pr = per_cpu(processors, i);
637		if (!pr)
638			continue;
639
640		pr->performance = per_cpu_ptr(performance, i);
641		cpumask_set_cpu(i, pr->performance->shared_cpu_map);
642		pdomain = &(pr->performance->domain_info);
643		if (acpi_processor_get_psd(pr->handle, pdomain)) {
644			retval = -EINVAL;
645			continue;
646		}
647	}
648	if (retval)
649		goto err_ret;
650
651	/*
652	 * Now that we have _PSD data from all CPUs, lets setup P-state
653	 * domain info.
654	 */
655	for_each_possible_cpu(i) {
656		pr = per_cpu(processors, i);
657		if (!pr)
658			continue;
659
660		if (cpumask_test_cpu(i, covered_cpus))
661			continue;
662
663		pdomain = &(pr->performance->domain_info);
664		cpumask_set_cpu(i, pr->performance->shared_cpu_map);
665		cpumask_set_cpu(i, covered_cpus);
666		if (pdomain->num_processors <= 1)
667			continue;
668
669		/* Validate the Domain info */
670		count_target = pdomain->num_processors;
671		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
672			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
673		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
674			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
675		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
676			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
677
678		for_each_possible_cpu(j) {
679			if (i == j)
680				continue;
681
682			match_pr = per_cpu(processors, j);
683			if (!match_pr)
684				continue;
685
686			match_pdomain = &(match_pr->performance->domain_info);
687			if (match_pdomain->domain != pdomain->domain)
688				continue;
689
690			/* Here i and j are in the same domain */
691
692			if (match_pdomain->num_processors != count_target) {
693				retval = -EINVAL;
694				goto err_ret;
695			}
696
697			if (pdomain->coord_type != match_pdomain->coord_type) {
698				retval = -EINVAL;
699				goto err_ret;
700			}
701
702			cpumask_set_cpu(j, covered_cpus);
703			cpumask_set_cpu(j, pr->performance->shared_cpu_map);
704		}
705
706		for_each_possible_cpu(j) {
707			if (i == j)
708				continue;
709
710			match_pr = per_cpu(processors, j);
711			if (!match_pr)
712				continue;
713
714			match_pdomain = &(match_pr->performance->domain_info);
715			if (match_pdomain->domain != pdomain->domain)
716				continue;
717
718			match_pr->performance->shared_type =
719					pr->performance->shared_type;
720			cpumask_copy(match_pr->performance->shared_cpu_map,
721				     pr->performance->shared_cpu_map);
722		}
723	}
724
725err_ret:
726	for_each_possible_cpu(i) {
727		pr = per_cpu(processors, i);
728		if (!pr || !pr->performance)
729			continue;
730
731		/* Assume no coordination on any error parsing domain info */
732		if (retval) {
733			cpumask_clear(pr->performance->shared_cpu_map);
734			cpumask_set_cpu(i, pr->performance->shared_cpu_map);
735			pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
736		}
737		pr->performance = NULL; /* Will be set for real in register */
738	}
739
740err_out:
741	mutex_unlock(&performance_mutex);
742	free_cpumask_var(covered_cpus);
743	return retval;
744}
745EXPORT_SYMBOL(acpi_processor_preregister_performance);
746
747int
748acpi_processor_register_performance(struct acpi_processor_performance
749				    *performance, unsigned int cpu)
750{
751	struct acpi_processor *pr;
752
753	if (!acpi_processor_cpufreq_init)
754		return -EINVAL;
755
756	mutex_lock(&performance_mutex);
757
758	pr = per_cpu(processors, cpu);
759	if (!pr) {
760		mutex_unlock(&performance_mutex);
761		return -ENODEV;
762	}
763
764	if (pr->performance) {
765		mutex_unlock(&performance_mutex);
766		return -EBUSY;
767	}
768
769	WARN_ON(!performance);
770
771	pr->performance = performance;
772
773	if (acpi_processor_get_performance_info(pr)) {
774		pr->performance = NULL;
775		mutex_unlock(&performance_mutex);
776		return -EIO;
777	}
778
779	mutex_unlock(&performance_mutex);
780	return 0;
781}
782
783EXPORT_SYMBOL(acpi_processor_register_performance);
784
785void acpi_processor_unregister_performance(unsigned int cpu)
786{
787	struct acpi_processor *pr;
788
789	mutex_lock(&performance_mutex);
790
791	pr = per_cpu(processors, cpu);
792	if (!pr) {
793		mutex_unlock(&performance_mutex);
794		return;
795	}
796
797	if (pr->performance)
798		kfree(pr->performance->states);
799	pr->performance = NULL;
800
801	mutex_unlock(&performance_mutex);
802
803	return;
804}
805
806EXPORT_SYMBOL(acpi_processor_unregister_performance);
807