Lines Matching refs:event

578 void intel_pmu_lbr_add(struct perf_event *event)
585 if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
588 cpuc->br_sel = event->hw.branch_reg.reg;
590 if (branch_user_callstack(cpuc->br_sel) && event->pmu_ctx->task_ctx_data)
591 task_context_opt(event->pmu_ctx->task_ctx_data)->lbr_callstack_users++;
595 * regular perf event scheduling, so that call will:
604 * first LBR event.
608 * event->total_time_running. An event that has accrued runtime cannot
609 * be 'new'. Conversely, a new event can get installed through the
612 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
614 perf_sched_cb_inc(event->pmu);
615 if (!cpuc->lbr_users++ && !event->total_time_running)
659 void intel_pmu_lbr_del(struct perf_event *event)
667 event->pmu_ctx->task_ctx_data)
668 task_context_opt(event->pmu_ctx->task_ctx_data)->lbr_callstack_users--;
670 if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
673 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
678 perf_sched_cb_dec(event->pmu);
931 * This could be smarter and actually check the event,
948 static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
950 u64 br_type = event->attr.branch_sample_type;
1007 event->hw.branch_reg.reg = mask;
1016 static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
1019 u64 br_type = event->attr.branch_sample_type;
1035 reg = &event->hw.branch_reg;
1068 int intel_pmu_setup_lbr_filter(struct perf_event *event)
1081 ret = intel_pmu_setup_sw_lbr_filter(event);
1089 ret = intel_pmu_setup_hw_lbr_filter(event);