Lines Matching refs:cpuc
387 * If new events have been scheduled then update cpuc with the new
391 static void maybe_change_configuration(struct cpu_hw_events *cpuc)
395 if (cpuc->n_added == 0)
399 for (j = 0; j < cpuc->n_events; j++) {
400 struct perf_event *pe = cpuc->event[j];
402 if (cpuc->current_idx[j] != PMC_NO_INDEX &&
403 cpuc->current_idx[j] != pe->hw.idx) {
404 alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0);
405 cpuc->current_idx[j] = PMC_NO_INDEX;
410 cpuc->idx_mask = 0;
411 for (j = 0; j < cpuc->n_events; j++) {
412 struct perf_event *pe = cpuc->event[j];
416 if (cpuc->current_idx[j] == PMC_NO_INDEX) {
418 cpuc->current_idx[j] = idx;
422 cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
424 cpuc->config = cpuc->event[0]->hw.config_base;
435 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
456 n0 = cpuc->n_events;
458 cpuc->event[n0] = event;
459 cpuc->evtype[n0] = event->hw.event_base;
460 cpuc->current_idx[n0] = PMC_NO_INDEX;
462 if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) {
463 cpuc->n_events++;
464 cpuc->n_added++;
487 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
495 for (j = 0; j < cpuc->n_events; j++) {
496 if (event == cpuc->event[j]) {
497 int idx = cpuc->current_idx[j];
502 while (++j < cpuc->n_events) {
503 cpuc->event[j - 1] = cpuc->event[j];
504 cpuc->evtype[j - 1] = cpuc->evtype[j];
505 cpuc->current_idx[j - 1] =
506 cpuc->current_idx[j];
513 cpuc->idx_mask &= ~(1UL<<idx);
514 cpuc->n_events--;
535 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
538 cpuc->idx_mask &= ~(1UL<<hwc->idx);
547 if (cpuc->enabled)
555 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
567 cpuc->idx_mask |= 1UL<<hwc->idx;
568 if (cpuc->enabled)
722 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
724 if (cpuc->enabled)
727 cpuc->enabled = 1;
730 if (cpuc->n_events > 0) {
731 /* Update cpuc with information from any new scheduled events. */
732 maybe_change_configuration(cpuc);
736 wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config);
737 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
748 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
750 if (!cpuc->enabled)
753 cpuc->enabled = 0;
754 cpuc->n_added = 0;
756 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
807 struct cpu_hw_events *cpuc;
814 cpuc = this_cpu_ptr(&cpu_hw_events);
821 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
828 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
834 for (j = 0; j < cpuc->n_events; j++) {
835 if (cpuc->current_idx[j] == idx)
839 if (unlikely(j == cpuc->n_events)) {
841 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
845 event = cpuc->event[j];
851 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
867 wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);