Lines Matching defs:events
26 /* This is enough to filter the vast majority of currently defined events. */
130 * Ignore overflow events for counters that are scheduled to be
195 * For most PEBS hardware events, the difference in the software
196 * precision levels of guest and host PEBS events will not affect
288 * 'excludes' list separately rather than on the 'events' list (which
296 static int find_filter_index(u64 *events, u64 nevents, u64 key)
298 u64 *fe = bsearch(&key, events, nevents, sizeof(events[0]),
304 return fe - events;
319 static bool filter_contains_match(u64 *events, u64 nevents, u64 eventsel)
325 index = find_filter_index(events, nevents, event_select);
334 if (filter_event_cmp(&events[i], &event_select))
337 if (is_filter_entry_match(events[i], umask))
342 if (filter_event_cmp(&events[i], &event_select))
345 if (is_filter_entry_match(events[i], umask))
698 * Stop/release all existing counters/events before realizing the new
809 if (filter->events[i] & ~mask)
822 * Skip events that are impossible to match against a guest
827 if (filter->events[i] & ~(kvm_pmu_ops.EVENTSEL_EVENT |
831 * Convert userspace events to a common in-kernel event so
832 * only one code path is needed to support both events. For
833 * the in-kernel events use masked events because they are
835 * events all that's needed is to add an "all ones" umask_mask,
836 * (unmasked filter events don't support EXCLUDE).
838 filter->events[j++] = filter->events[i] |
861 sort(&filter->events, filter->nevents, sizeof(filter->events[0]),
865 /* Find the first EXCLUDE event (only supported for masked events). */
868 if (filter->events[i] & KVM_PMU_MASKED_ENTRY_EXCLUDE)
875 filter->includes = filter->events;
876 filter->excludes = filter->events + filter->nr_includes;
904 size = struct_size(filter, events, tmp.nevents);
915 if (copy_from_user(filter->events, user_filter->events,
916 sizeof(filter->events[0]) * filter->nevents))