1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004, 05, 06 by Ralf Baechle
7 * Copyright (C) 2005 by MIPS Technologies, Inc.
8 */
9#include <linux/cpumask.h>
10#include <linux/oprofile.h>
11#include <linux/interrupt.h>
12#include <linux/smp.h>
13#include <asm/irq_regs.h>
14#include <asm/time.h>
15
16#include "op_impl.h"
17
18#define M_PERFCTL_EVENT(event)		(((event) << MIPS_PERFCTRL_EVENT_S) & \
19					 MIPS_PERFCTRL_EVENT)
20#define M_PERFCTL_VPEID(vpe)		((vpe)	  << MIPS_PERFCTRL_VPEID_S)
21
22#define M_COUNTER_OVERFLOW		(1UL	  << 31)
23
24static int (*save_perf_irq)(void);
25static int perfcount_irq;
26
27/*
28 * XLR has only one set of counters per core. Designate the
29 * first hardware thread in the core for setup and init.
30 * Skip CPUs with non-zero hardware thread id (4 hwt per core)
31 */
32#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
33#define oprofile_skip_cpu(c)	((cpu_logical_map(c) & 0x3) != 0)
34#else
35#define oprofile_skip_cpu(c)	0
36#endif
37
38#ifdef CONFIG_MIPS_MT_SMP
39#define WHAT		(MIPS_PERFCTRL_MT_EN_VPE | \
40			 M_PERFCTL_VPEID(cpu_vpe_id(&current_cpu_data)))
41#define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
42			0 : cpu_vpe_id(&current_cpu_data))
43
44/*
45 * The number of bits to shift to convert between counters per core and
46 * counters per VPE.  There is no reasonable interface atm to obtain the
47 * number of VPEs used by Linux and in the 34K this number is fixed to two
48 * anyways so we hardcore a few things here for the moment.  The way it's
49 * done here will ensure that oprofile VSMP kernel will run right on a lesser
50 * core like a 24K also or with maxcpus=1.
51 */
52static inline unsigned int vpe_shift(void)
53{
54	if (num_possible_cpus() > 1)
55		return 1;
56
57	return 0;
58}
59
60#else
61
62#define WHAT		0
63#define vpe_id()	0
64
65static inline unsigned int vpe_shift(void)
66{
67	return 0;
68}
69
70#endif
71
72static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
73{
74	return counters >> vpe_shift();
75}
76
77static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
78{
79	return counters << vpe_shift();
80}
81
82#define __define_perf_accessors(r, n, np)				\
83									\
84static inline unsigned int r_c0_ ## r ## n(void)			\
85{									\
86	unsigned int cpu = vpe_id();					\
87									\
88	switch (cpu) {							\
89	case 0:								\
90		return read_c0_ ## r ## n();				\
91	case 1:								\
92		return read_c0_ ## r ## np();				\
93	default:							\
94		BUG();							\
95	}								\
96	return 0;							\
97}									\
98									\
99static inline void w_c0_ ## r ## n(unsigned int value)			\
100{									\
101	unsigned int cpu = vpe_id();					\
102									\
103	switch (cpu) {							\
104	case 0:								\
105		write_c0_ ## r ## n(value);				\
106		return;							\
107	case 1:								\
108		write_c0_ ## r ## np(value);				\
109		return;							\
110	default:							\
111		BUG();							\
112	}								\
113	return;								\
114}									\
115
116__define_perf_accessors(perfcntr, 0, 2)
117__define_perf_accessors(perfcntr, 1, 3)
118__define_perf_accessors(perfcntr, 2, 0)
119__define_perf_accessors(perfcntr, 3, 1)
120
121__define_perf_accessors(perfctrl, 0, 2)
122__define_perf_accessors(perfctrl, 1, 3)
123__define_perf_accessors(perfctrl, 2, 0)
124__define_perf_accessors(perfctrl, 3, 1)
125
126struct op_mips_model op_model_mipsxx_ops;
127
128static struct mipsxx_register_config {
129	unsigned int control[4];
130	unsigned int counter[4];
131} reg;
132
133/* Compute all of the registers in preparation for enabling profiling.	*/
134
135static void mipsxx_reg_setup(struct op_counter_config *ctr)
136{
137	unsigned int counters = op_model_mipsxx_ops.num_counters;
138	int i;
139
140	/* Compute the performance counter control word.  */
141	for (i = 0; i < counters; i++) {
142		reg.control[i] = 0;
143		reg.counter[i] = 0;
144
145		if (!ctr[i].enabled)
146			continue;
147
148		reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
149				 MIPS_PERFCTRL_IE;
150		if (ctr[i].kernel)
151			reg.control[i] |= MIPS_PERFCTRL_K;
152		if (ctr[i].user)
153			reg.control[i] |= MIPS_PERFCTRL_U;
154		if (ctr[i].exl)
155			reg.control[i] |= MIPS_PERFCTRL_EXL;
156		if (boot_cpu_type() == CPU_XLR)
157			reg.control[i] |= XLR_PERFCTRL_ALLTHREADS;
158		reg.counter[i] = 0x80000000 - ctr[i].count;
159	}
160}
161
162/* Program all of the registers in preparation for enabling profiling.	*/
163
164static void mipsxx_cpu_setup(void *args)
165{
166	unsigned int counters = op_model_mipsxx_ops.num_counters;
167
168	if (oprofile_skip_cpu(smp_processor_id()))
169		return;
170
171	switch (counters) {
172	case 4:
173		w_c0_perfctrl3(0);
174		w_c0_perfcntr3(reg.counter[3]);
175		fallthrough;
176	case 3:
177		w_c0_perfctrl2(0);
178		w_c0_perfcntr2(reg.counter[2]);
179		fallthrough;
180	case 2:
181		w_c0_perfctrl1(0);
182		w_c0_perfcntr1(reg.counter[1]);
183		fallthrough;
184	case 1:
185		w_c0_perfctrl0(0);
186		w_c0_perfcntr0(reg.counter[0]);
187	}
188}
189
190/* Start all counters on current CPU */
191static void mipsxx_cpu_start(void *args)
192{
193	unsigned int counters = op_model_mipsxx_ops.num_counters;
194
195	if (oprofile_skip_cpu(smp_processor_id()))
196		return;
197
198	switch (counters) {
199	case 4:
200		w_c0_perfctrl3(WHAT | reg.control[3]);
201		fallthrough;
202	case 3:
203		w_c0_perfctrl2(WHAT | reg.control[2]);
204		fallthrough;
205	case 2:
206		w_c0_perfctrl1(WHAT | reg.control[1]);
207		fallthrough;
208	case 1:
209		w_c0_perfctrl0(WHAT | reg.control[0]);
210	}
211}
212
213/* Stop all counters on current CPU */
214static void mipsxx_cpu_stop(void *args)
215{
216	unsigned int counters = op_model_mipsxx_ops.num_counters;
217
218	if (oprofile_skip_cpu(smp_processor_id()))
219		return;
220
221	switch (counters) {
222	case 4:
223		w_c0_perfctrl3(0);
224		fallthrough;
225	case 3:
226		w_c0_perfctrl2(0);
227		fallthrough;
228	case 2:
229		w_c0_perfctrl1(0);
230		fallthrough;
231	case 1:
232		w_c0_perfctrl0(0);
233	}
234}
235
236static int mipsxx_perfcount_handler(void)
237{
238	unsigned int counters = op_model_mipsxx_ops.num_counters;
239	unsigned int control;
240	unsigned int counter;
241	int handled = IRQ_NONE;
242
243	if (cpu_has_mips_r2 && !(read_c0_cause() & CAUSEF_PCI))
244		return handled;
245
246	switch (counters) {
247#define HANDLE_COUNTER(n)						\
248	case n + 1:							\
249		control = r_c0_perfctrl ## n();				\
250		counter = r_c0_perfcntr ## n();				\
251		if ((control & MIPS_PERFCTRL_IE) &&			\
252		    (counter & M_COUNTER_OVERFLOW)) {			\
253			oprofile_add_sample(get_irq_regs(), n);		\
254			w_c0_perfcntr ## n(reg.counter[n]);		\
255			handled = IRQ_HANDLED;				\
256		}
257	HANDLE_COUNTER(3)
258	fallthrough;
259	HANDLE_COUNTER(2)
260	fallthrough;
261	HANDLE_COUNTER(1)
262	fallthrough;
263	HANDLE_COUNTER(0)
264	}
265
266	return handled;
267}
268
269static inline int __n_counters(void)
270{
271	if (!cpu_has_perf)
272		return 0;
273	if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
274		return 1;
275	if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
276		return 2;
277	if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
278		return 3;
279
280	return 4;
281}
282
283static inline int n_counters(void)
284{
285	int counters;
286
287	switch (current_cpu_type()) {
288	case CPU_R10000:
289		counters = 2;
290		break;
291
292	case CPU_R12000:
293	case CPU_R14000:
294	case CPU_R16000:
295		counters = 4;
296		break;
297
298	default:
299		counters = __n_counters();
300	}
301
302	return counters;
303}
304
305static void reset_counters(void *arg)
306{
307	int counters = (int)(long)arg;
308	switch (counters) {
309	case 4:
310		w_c0_perfctrl3(0);
311		w_c0_perfcntr3(0);
312		fallthrough;
313	case 3:
314		w_c0_perfctrl2(0);
315		w_c0_perfcntr2(0);
316		fallthrough;
317	case 2:
318		w_c0_perfctrl1(0);
319		w_c0_perfcntr1(0);
320		fallthrough;
321	case 1:
322		w_c0_perfctrl0(0);
323		w_c0_perfcntr0(0);
324	}
325}
326
327static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
328{
329	return mipsxx_perfcount_handler();
330}
331
332static int __init mipsxx_init(void)
333{
334	int counters;
335
336	counters = n_counters();
337	if (counters == 0) {
338		printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
339		return -ENODEV;
340	}
341
342#ifdef CONFIG_MIPS_MT_SMP
343	if (!cpu_has_mipsmt_pertccounters)
344		counters = counters_total_to_per_cpu(counters);
345#endif
346	on_each_cpu(reset_counters, (void *)(long)counters, 1);
347
348	op_model_mipsxx_ops.num_counters = counters;
349	switch (current_cpu_type()) {
350	case CPU_M14KC:
351		op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
352		break;
353
354	case CPU_M14KEC:
355		op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
356		break;
357
358	case CPU_20KC:
359		op_model_mipsxx_ops.cpu_type = "mips/20K";
360		break;
361
362	case CPU_24K:
363		op_model_mipsxx_ops.cpu_type = "mips/24K";
364		break;
365
366	case CPU_25KF:
367		op_model_mipsxx_ops.cpu_type = "mips/25K";
368		break;
369
370	case CPU_1004K:
371	case CPU_34K:
372		op_model_mipsxx_ops.cpu_type = "mips/34K";
373		break;
374
375	case CPU_1074K:
376	case CPU_74K:
377		op_model_mipsxx_ops.cpu_type = "mips/74K";
378		break;
379
380	case CPU_INTERAPTIV:
381		op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
382		break;
383
384	case CPU_PROAPTIV:
385		op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
386		break;
387
388	case CPU_P5600:
389		op_model_mipsxx_ops.cpu_type = "mips/P5600";
390		break;
391
392	case CPU_I6400:
393		op_model_mipsxx_ops.cpu_type = "mips/I6400";
394		break;
395
396	case CPU_M5150:
397		op_model_mipsxx_ops.cpu_type = "mips/M5150";
398		break;
399
400	case CPU_5KC:
401		op_model_mipsxx_ops.cpu_type = "mips/5K";
402		break;
403
404	case CPU_R10000:
405		if ((current_cpu_data.processor_id & 0xff) == 0x20)
406			op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
407		else
408			op_model_mipsxx_ops.cpu_type = "mips/r10000";
409		break;
410
411	case CPU_R12000:
412	case CPU_R14000:
413		op_model_mipsxx_ops.cpu_type = "mips/r12000";
414		break;
415
416	case CPU_R16000:
417		op_model_mipsxx_ops.cpu_type = "mips/r16000";
418		break;
419
420	case CPU_SB1:
421	case CPU_SB1A:
422		op_model_mipsxx_ops.cpu_type = "mips/sb1";
423		break;
424
425	case CPU_LOONGSON32:
426		op_model_mipsxx_ops.cpu_type = "mips/loongson1";
427		break;
428
429	case CPU_XLR:
430		op_model_mipsxx_ops.cpu_type = "mips/xlr";
431		break;
432
433	default:
434		printk(KERN_ERR "Profiling unsupported for this CPU\n");
435
436		return -ENODEV;
437	}
438
439	save_perf_irq = perf_irq;
440	perf_irq = mipsxx_perfcount_handler;
441
442	if (get_c0_perfcount_int)
443		perfcount_irq = get_c0_perfcount_int();
444	else if (cp0_perfcount_irq >= 0)
445		perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
446	else
447		perfcount_irq = -1;
448
449	if (perfcount_irq >= 0)
450		return request_irq(perfcount_irq, mipsxx_perfcount_int,
451				   IRQF_PERCPU | IRQF_NOBALANCING |
452				   IRQF_NO_THREAD | IRQF_NO_SUSPEND |
453				   IRQF_SHARED,
454				   "Perfcounter", save_perf_irq);
455
456	return 0;
457}
458
459static void mipsxx_exit(void)
460{
461	int counters = op_model_mipsxx_ops.num_counters;
462
463	if (perfcount_irq >= 0)
464		free_irq(perfcount_irq, save_perf_irq);
465
466	counters = counters_per_cpu_to_total(counters);
467	on_each_cpu(reset_counters, (void *)(long)counters, 1);
468
469	perf_irq = save_perf_irq;
470}
471
472struct op_mips_model op_model_mipsxx_ops = {
473	.reg_setup	= mipsxx_reg_setup,
474	.cpu_setup	= mipsxx_cpu_setup,
475	.init		= mipsxx_init,
476	.exit		= mipsxx_exit,
477	.cpu_start	= mipsxx_cpu_start,
478	.cpu_stop	= mipsxx_cpu_stop,
479};
480