Lines Matching refs:cpuc

740 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
743 if (!cpuc->ds)
757 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
758 struct debug_store *ds = cpuc->ds;
764 struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1120 static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
1122 if (cpuc->n_pebs == cpuc->n_pebs_via_pt)
1125 return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
1130 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1132 if (!sched_in && pebs_needs_sched_cb(cpuc))
1136 static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
1138 struct debug_store *ds = cpuc->ds;
1139 int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
1140 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
1144 if (cpuc->n_pebs_via_pt)
1152 if (cpuc->n_pebs == cpuc->n_large_pebs) {
1154 reserved * cpuc->pebs_record_size;
1156 threshold = ds->pebs_buffer_base + cpuc->pebs_record_size;
1164 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1165 u64 pebs_data_cfg = cpuc->pebs_data_cfg;
1177 cpuc->pebs_record_size = sz;
1233 pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
1243 if (cpuc->n_pebs == 1)
1244 cpuc->pebs_data_cfg = PEBS_UPDATE_DS_SW;
1246 if (needed_cb != pebs_needs_sched_cb(cpuc)) {
1252 cpuc->pebs_data_cfg |= PEBS_UPDATE_DS_SW;
1266 if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
1267 cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW;
1273 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1275 bool needed_cb = pebs_needs_sched_cb(cpuc);
1277 cpuc->n_pebs++;
1279 cpuc->n_large_pebs++;
1281 cpuc->n_pebs_via_pt++;
1283 pebs_update_state(needed_cb, cpuc, event, true);
1288 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1293 if (!(cpuc->pebs_enabled & ~PEBS_VIA_PT_MASK))
1294 cpuc->pebs_enabled &= ~PEBS_VIA_PT_MASK;
1299 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1301 struct debug_store *ds = cpuc->ds;
1310 cpuc->pebs_enabled |= PEBS_PMI_AFTER_EACH_RECORD;
1312 cpuc->pebs_enabled |= PEBS_OUTPUT_PT;
1325 static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc)
1327 if (cpuc->n_pebs == cpuc->n_large_pebs &&
1328 cpuc->n_pebs != cpuc->n_pebs_via_pt)
1334 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1335 u64 pebs_data_cfg = cpuc->pebs_data_cfg & ~PEBS_UPDATE_DS_SW;
1337 struct debug_store *ds = cpuc->ds;
1342 cpuc->pebs_enabled |= 1ULL << hwc->idx;
1345 cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
1347 cpuc->pebs_enabled |= 1ULL << 63;
1351 if (pebs_data_cfg != cpuc->active_pebs_data_cfg) {
1357 intel_pmu_drain_large_pebs(cpuc);
1360 cpuc->active_pebs_data_cfg = pebs_data_cfg;
1363 if (cpuc->pebs_data_cfg & PEBS_UPDATE_DS_SW) {
1364 cpuc->pebs_data_cfg = pebs_data_cfg;
1365 pebs_update_threshold(cpuc);
1391 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1393 bool needed_cb = pebs_needs_sched_cb(cpuc);
1395 cpuc->n_pebs--;
1397 cpuc->n_large_pebs--;
1399 cpuc->n_pebs_via_pt--;
1401 pebs_update_state(needed_cb, cpuc, event, false);
1406 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1409 intel_pmu_drain_large_pebs(cpuc);
1411 cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
1415 cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
1417 cpuc->pebs_enabled &= ~(1ULL << 63);
1421 if (cpuc->enabled)
1422 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
1429 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1431 if (cpuc->pebs_enabled)
1432 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
1437 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1439 if (cpuc->pebs_enabled)
1445 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1446 unsigned long from = cpuc->lbr_entries[0].from;
1447 unsigned long old_to, to = cpuc->lbr_entries[0].to;
1462 if (!cpuc->lbr_stack.nr || !from || !to)
1620 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1758 perf_sample_save_brstack(data, event, &cpuc->lbr_stack);
1797 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1915 perf_sample_save_brstack(data, event, &cpuc->lbr_stack);
1929 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1943 for (at = base; at < top; at += cpuc->pebs_record_size) {
1955 pebs_status = status & cpuc->pebs_enabled;
2046 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2070 at += cpuc->pebs_record_size;
2096 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2097 struct debug_store *ds = cpuc->ds;
2098 struct perf_event *event = cpuc->events[0]; /* PMC0 only */
2113 if (!test_bit(0, cpuc->active_mask))
2132 static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
2144 for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) {
2145 event = cpuc->events[bit];
2153 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2154 struct debug_store *ds = cpuc->ds;
2178 intel_pmu_pebs_event_update_no_drain(cpuc, size);
2186 pebs_status = p->status & cpuc->pebs_enabled;
2205 if (!pebs_status && cpuc->pebs_enabled &&
2206 !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
2207 pebs_status = p->status = cpuc->pebs_enabled;
2242 event = cpuc->events[bit];
2268 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2269 int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
2270 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
2271 struct debug_store *ds = cpuc->ds;
2290 intel_pmu_pebs_event_update_no_drain(cpuc, size);
2294 for (at = base; at < top; at += cpuc->pebs_record_size) {
2297 pebs_status = get_pebs_status(at) & cpuc->pebs_enabled;
2308 event = cpuc->events[bit];