Lines Matching refs:event
276 * sysfs event attributes
887 static int xgene_perf_event_init(struct perf_event *event)
889 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
890 struct hw_perf_event *hw = &event->hw;
893 /* Test the event attr type check for PMU enumeration */
894 if (event->attr.type != event->pmu->type)
900 * Also, it does not support event sampling mode.
902 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
905 if (event->cpu < 0)
912 * event could be theoretically assigned to a different CPU. To
916 event->cpu = cpumask_first(&pmu_dev->parent->cpu);
918 hw->config = event->attr.config;
921 * request of the event come. The event is counted only if it's caused
923 * By default, the event is counted for all agents.
925 hw->config_base = event->attr.config1;
931 if (event->group_leader->pmu != event->pmu &&
932 !is_software_event(event->group_leader))
935 for_each_sibling_event(sibling, event->group_leader) {
936 if (sibling->pmu != event->pmu &&
944 static void xgene_perf_enable_event(struct perf_event *event)
946 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
949 xgene_pmu->ops->write_evttype(pmu_dev, GET_CNTR(event),
950 GET_EVENTID(event));
951 xgene_pmu->ops->write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event)));
954 ~((u32)GET_AGENT1ID(event)));
956 xgene_pmu->ops->enable_counter(pmu_dev, GET_CNTR(event));
957 xgene_pmu->ops->enable_counter_int(pmu_dev, GET_CNTR(event));
960 static void xgene_perf_disable_event(struct perf_event *event)
962 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
965 xgene_pmu->ops->disable_counter(pmu_dev, GET_CNTR(event));
966 xgene_pmu->ops->disable_counter_int(pmu_dev, GET_CNTR(event));
969 static void xgene_perf_event_set_period(struct perf_event *event)
971 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
973 struct hw_perf_event *hw = &event->hw;
987 static void xgene_perf_event_update(struct perf_event *event)
989 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
991 struct hw_perf_event *hw = &event->hw;
996 new_raw_count = xgene_pmu->ops->read_counter(pmu_dev, GET_CNTR(event));
1004 local64_add(delta, &event->count);
1007 static void xgene_perf_read(struct perf_event *event)
1009 xgene_perf_event_update(event);
1012 static void xgene_perf_start(struct perf_event *event, int flags)
1014 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
1016 struct hw_perf_event *hw = &event->hw;
1024 xgene_perf_event_set_period(event);
1029 xgene_pmu->ops->write_counter(pmu_dev, GET_CNTR(event),
1033 xgene_perf_enable_event(event);
1034 perf_event_update_userpage(event);
1037 static void xgene_perf_stop(struct perf_event *event, int flags)
1039 struct hw_perf_event *hw = &event->hw;
1044 xgene_perf_disable_event(event);
1051 xgene_perf_read(event);
1055 static int xgene_perf_add(struct perf_event *event, int flags)
1057 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
1058 struct hw_perf_event *hw = &event->hw;
1062 /* Allocate an event counter */
1067 /* Update counter event pointer for Interrupt handler */
1068 pmu_dev->pmu_counter_event[hw->idx] = event;
1071 xgene_perf_start(event, PERF_EF_RELOAD);
1076 static void xgene_perf_del(struct perf_event *event, int flags)
1078 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
1079 struct hw_perf_event *hw = &event->hw;
1081 xgene_perf_stop(event, PERF_EF_UPDATE);
1084 clear_avail_cntr(pmu_dev, GET_CNTR(event));
1086 perf_event_update_userpage(event);
1098 /* First version PMU supports only single event counter */
1215 struct perf_event *event = pmu_dev->pmu_counter_event[idx];
1218 /* Ignore if we don't have an event. */
1219 if (!event || !overflowed)
1221 xgene_perf_event_update(event);
1222 xgene_perf_event_set_period(event);