Lines Matching refs:event
659 void intel_pmu_lbr_add(struct perf_event *event)
666 if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
669 cpuc->br_sel = event->hw.branch_reg.reg;
671 if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data)
672 task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users++;
676 * regular perf event scheduling, so that call will:
685 * first LBR event.
689 * event->total_time_running. An event that has accrued runtime cannot
690 * be 'new'. Conversely, a new event can get installed through the
693 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
695 perf_sched_cb_inc(event->ctx->pmu);
696 if (!cpuc->lbr_users++ && !event->total_time_running)
740 void intel_pmu_lbr_del(struct perf_event *event)
748 event->ctx->task_ctx_data)
749 task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users--;
751 if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
754 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
759 perf_sched_cb_dec(event->ctx->pmu);
1008 * This could be smarter and actually check the event,
1025 static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
1027 u64 br_type = event->attr.branch_sample_type;
1084 event->hw.branch_reg.reg = mask;
1093 static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
1096 u64 br_type = event->attr.branch_sample_type;
1112 reg = &event->hw.branch_reg;
1145 int intel_pmu_setup_lbr_filter(struct perf_event *event)
1158 ret = intel_pmu_setup_sw_lbr_filter(event);
1166 ret = intel_pmu_setup_hw_lbr_filter(event);