Lines Matching refs:event
10 * based on the sparc64 perf event code and the x86 code. Performance
37 * is used for an event.
96 #define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \
334 * when the former kind of event takes the counter the
335 * latter kind of event wants to use, then the "counter
336 * allocation" for the latter event will fail. In fact if
350 struct perf_event *event = container_of(evt, struct perf_event, hw);
379 * Set up the counter for a particular CPU when event->cpu is
383 cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
409 static int mipspmu_event_set_period(struct perf_event *event,
444 perf_event_update_userpage(event);
449 static void mipspmu_event_update(struct perf_event *event,
466 local64_add(delta, &event->count);
470 static void mipspmu_start(struct perf_event *event, int flags)
472 struct hw_perf_event *hwc = &event->hw;
479 /* Set the period for the event. */
480 mipspmu_event_set_period(event, hwc, hwc->idx);
482 /* Enable the event. */
486 static void mipspmu_stop(struct perf_event *event, int flags)
488 struct hw_perf_event *hwc = &event->hw;
491 /* We are working on a local event. */
494 mipspmu_event_update(event, hwc, hwc->idx);
499 static int mipspmu_add(struct perf_event *event, int flags)
502 struct hw_perf_event *hwc = &event->hw;
506 perf_pmu_disable(event->pmu);
508 /* To look for a free counter for this event. */
516 * If there is an event in the counter we are going to use then
519 event->hw.idx = idx;
521 cpuc->events[idx] = event;
525 mipspmu_start(event, PERF_EF_RELOAD);
528 perf_event_update_userpage(event);
531 perf_pmu_enable(event->pmu);
535 static void mipspmu_del(struct perf_event *event, int flags)
538 struct hw_perf_event *hwc = &event->hw;
543 mipspmu_stop(event, PERF_EF_UPDATE);
547 perf_event_update_userpage(event);
550 static void mipspmu_read(struct perf_event *event)
552 struct hw_perf_event *hwc = &event->hw;
558 mipspmu_event_update(event, hwc, hwc->idx);
635 static int __hw_perf_event_init(struct perf_event *event);
637 static void hw_perf_event_destroy(struct perf_event *event)
652 static int mipspmu_event_init(struct perf_event *event)
657 if (has_branch_stack(event))
660 switch (event->attr.type) {
670 if (event->cpu >= 0 && !cpu_online(event->cpu))
686 return __hw_perf_event_init(event);
760 static int validate_group(struct perf_event *event)
762 struct perf_event *sibling, *leader = event->group_leader;
775 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
786 struct perf_event *event = cpuc->events[idx];
787 struct hw_perf_event *hwc = &event->hw;
789 mipspmu_event_update(event, hwc, idx);
790 data->period = event->hw.last_period;
791 if (!mipspmu_event_set_period(event, hwc, idx))
794 if (perf_event_overflow(event, data, regs))
936 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
945 /* 74K/proAptiv core has different branch event code. */
1014 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
1095 /* 74K/proAptiv core has completely different cache event map. */
1463 * Only general DTLB misses are counted use the same event for
1512 * Only general DTLB misses are counted use the same event for
1537 static int __hw_perf_event_init(struct perf_event *event)
1539 struct perf_event_attr *attr = &event->attr;
1540 struct hw_perf_event *hwc = &event->hw;
1544 /* Returning MIPS event descriptor for generic perf event. */
1545 if (PERF_TYPE_HARDWARE == event->attr.type) {
1546 if (event->attr.config >= PERF_COUNT_HW_MAX)
1548 pev = mipspmu_map_general_event(event->attr.config);
1549 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1550 pev = mipspmu_map_cache_event(event->attr.config);
1551 } else if (PERF_TYPE_RAW == event->attr.type) {
1552 /* We are working on the global raw event. */
1554 pev = mipspmu.map_raw_event(event->attr.config);
1556 /* The event type is not (yet) supported. */
1561 if (PERF_TYPE_RAW == event->attr.type)
1573 if (PERF_TYPE_RAW == event->attr.type)
1588 * The event can belong to another cpu. We do not assign a local
1601 if (event->group_leader != event)
1602 err = validate_group(event);
1604 event->destroy = hw_perf_event_destroy;
1607 event->destroy(event);
1763 * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
1772 /* currently most cores have 7-bit event numbers */
1830 /* 8-bit event numbers */
1844 /* 8-bit event numbers */