18c2ecf20Sopenharmony_ci/*
28c2ecf20Sopenharmony_ci * Copyright © 2015-2016 Intel Corporation
38c2ecf20Sopenharmony_ci *
48c2ecf20Sopenharmony_ci * Permission is hereby granted, free of charge, to any person obtaining a
58c2ecf20Sopenharmony_ci * copy of this software and associated documentation files (the "Software"),
68c2ecf20Sopenharmony_ci * to deal in the Software without restriction, including without limitation
78c2ecf20Sopenharmony_ci * the rights to use, copy, modify, merge, publish, distribute, sublicense,
88c2ecf20Sopenharmony_ci * and/or sell copies of the Software, and to permit persons to whom the
98c2ecf20Sopenharmony_ci * Software is furnished to do so, subject to the following conditions:
108c2ecf20Sopenharmony_ci *
118c2ecf20Sopenharmony_ci * The above copyright notice and this permission notice (including the next
128c2ecf20Sopenharmony_ci * paragraph) shall be included in all copies or substantial portions of the
138c2ecf20Sopenharmony_ci * Software.
148c2ecf20Sopenharmony_ci *
158c2ecf20Sopenharmony_ci * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
168c2ecf20Sopenharmony_ci * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
178c2ecf20Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
188c2ecf20Sopenharmony_ci * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
198c2ecf20Sopenharmony_ci * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
208c2ecf20Sopenharmony_ci * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
218c2ecf20Sopenharmony_ci * IN THE SOFTWARE.
228c2ecf20Sopenharmony_ci *
238c2ecf20Sopenharmony_ci * Authors:
248c2ecf20Sopenharmony_ci *   Robert Bragg <robert@sixbynine.org>
258c2ecf20Sopenharmony_ci */
268c2ecf20Sopenharmony_ci
278c2ecf20Sopenharmony_ci
288c2ecf20Sopenharmony_ci/**
298c2ecf20Sopenharmony_ci * DOC: i915 Perf Overview
308c2ecf20Sopenharmony_ci *
318c2ecf20Sopenharmony_ci * Gen graphics supports a large number of performance counters that can help
328c2ecf20Sopenharmony_ci * driver and application developers understand and optimize their use of the
338c2ecf20Sopenharmony_ci * GPU.
348c2ecf20Sopenharmony_ci *
358c2ecf20Sopenharmony_ci * This i915 perf interface enables userspace to configure and open a file
368c2ecf20Sopenharmony_ci * descriptor representing a stream of GPU metrics which can then be read() as
378c2ecf20Sopenharmony_ci * a stream of sample records.
388c2ecf20Sopenharmony_ci *
398c2ecf20Sopenharmony_ci * The interface is particularly suited to exposing buffered metrics that are
408c2ecf20Sopenharmony_ci * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
418c2ecf20Sopenharmony_ci *
428c2ecf20Sopenharmony_ci * Streams representing a single context are accessible to applications with a
438c2ecf20Sopenharmony_ci * corresponding drm file descriptor, such that OpenGL can use the interface
448c2ecf20Sopenharmony_ci * without special privileges. Access to system-wide metrics requires root
458c2ecf20Sopenharmony_ci * privileges by default, unless changed via the dev.i915.perf_event_paranoid
468c2ecf20Sopenharmony_ci * sysctl option.
478c2ecf20Sopenharmony_ci *
488c2ecf20Sopenharmony_ci */
498c2ecf20Sopenharmony_ci
508c2ecf20Sopenharmony_ci/**
518c2ecf20Sopenharmony_ci * DOC: i915 Perf History and Comparison with Core Perf
528c2ecf20Sopenharmony_ci *
538c2ecf20Sopenharmony_ci * The interface was initially inspired by the core Perf infrastructure but
548c2ecf20Sopenharmony_ci * some notable differences are:
558c2ecf20Sopenharmony_ci *
568c2ecf20Sopenharmony_ci * i915 perf file descriptors represent a "stream" instead of an "event"; where
578c2ecf20Sopenharmony_ci * a perf event primarily corresponds to a single 64bit value, while a stream
588c2ecf20Sopenharmony_ci * might sample sets of tightly-coupled counters, depending on the
598c2ecf20Sopenharmony_ci * configuration.  For example the Gen OA unit isn't designed to support
608c2ecf20Sopenharmony_ci * orthogonal configurations of individual counters; it's configured for a set
618c2ecf20Sopenharmony_ci * of related counters. Samples for an i915 perf stream capturing OA metrics
628c2ecf20Sopenharmony_ci * will include a set of counter values packed in a compact HW specific format.
638c2ecf20Sopenharmony_ci * The OA unit supports a number of different packing formats which can be
648c2ecf20Sopenharmony_ci * selected by the user opening the stream. Perf has support for grouping
658c2ecf20Sopenharmony_ci * events, but each event in the group is configured, validated and
668c2ecf20Sopenharmony_ci * authenticated individually with separate system calls.
678c2ecf20Sopenharmony_ci *
688c2ecf20Sopenharmony_ci * i915 perf stream configurations are provided as an array of u64 (key,value)
698c2ecf20Sopenharmony_ci * pairs, instead of a fixed struct with multiple miscellaneous config members,
708c2ecf20Sopenharmony_ci * interleaved with event-type specific members.
718c2ecf20Sopenharmony_ci *
728c2ecf20Sopenharmony_ci * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
738c2ecf20Sopenharmony_ci * The supported metrics are being written to memory by the GPU unsynchronized
748c2ecf20Sopenharmony_ci * with the CPU, using HW specific packing formats for counter sets. Sometimes
758c2ecf20Sopenharmony_ci * the constraints on HW configuration require reports to be filtered before it
768c2ecf20Sopenharmony_ci * would be acceptable to expose them to unprivileged applications - to hide
778c2ecf20Sopenharmony_ci * the metrics of other processes/contexts. For these use cases a read() based
788c2ecf20Sopenharmony_ci * interface is a good fit, and provides an opportunity to filter data as it
798c2ecf20Sopenharmony_ci * gets copied from the GPU mapped buffers to userspace buffers.
808c2ecf20Sopenharmony_ci *
818c2ecf20Sopenharmony_ci *
828c2ecf20Sopenharmony_ci * Issues hit with first prototype based on Core Perf
838c2ecf20Sopenharmony_ci * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
848c2ecf20Sopenharmony_ci *
858c2ecf20Sopenharmony_ci * The first prototype of this driver was based on the core perf
868c2ecf20Sopenharmony_ci * infrastructure, and while we did make that mostly work, with some changes to
878c2ecf20Sopenharmony_ci * perf, we found we were breaking or working around too many assumptions baked
888c2ecf20Sopenharmony_ci * into perf's currently cpu centric design.
898c2ecf20Sopenharmony_ci *
908c2ecf20Sopenharmony_ci * In the end we didn't see a clear benefit to making perf's implementation and
918c2ecf20Sopenharmony_ci * interface more complex by changing design assumptions while we knew we still
928c2ecf20Sopenharmony_ci * wouldn't be able to use any existing perf based userspace tools.
938c2ecf20Sopenharmony_ci *
948c2ecf20Sopenharmony_ci * Also considering the Gen specific nature of the Observability hardware and
958c2ecf20Sopenharmony_ci * how userspace will sometimes need to combine i915 perf OA metrics with
968c2ecf20Sopenharmony_ci * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
978c2ecf20Sopenharmony_ci * expecting the interface to be used by a platform specific userspace such as
988c2ecf20Sopenharmony_ci * OpenGL or tools. This is to say; we aren't inherently missing out on having
998c2ecf20Sopenharmony_ci * a standard vendor/architecture agnostic interface by not using perf.
1008c2ecf20Sopenharmony_ci *
1018c2ecf20Sopenharmony_ci *
1028c2ecf20Sopenharmony_ci * For posterity, in case we might re-visit trying to adapt core perf to be
1038c2ecf20Sopenharmony_ci * better suited to exposing i915 metrics these were the main pain points we
1048c2ecf20Sopenharmony_ci * hit:
1058c2ecf20Sopenharmony_ci *
1068c2ecf20Sopenharmony_ci * - The perf based OA PMU driver broke some significant design assumptions:
1078c2ecf20Sopenharmony_ci *
1088c2ecf20Sopenharmony_ci *   Existing perf pmus are used for profiling work on a cpu and we were
1098c2ecf20Sopenharmony_ci *   introducing the idea of _IS_DEVICE pmus with different security
1108c2ecf20Sopenharmony_ci *   implications, the need to fake cpu-related data (such as user/kernel
1118c2ecf20Sopenharmony_ci *   registers) to fit with perf's current design, and adding _DEVICE records
1128c2ecf20Sopenharmony_ci *   as a way to forward device-specific status records.
1138c2ecf20Sopenharmony_ci *
1148c2ecf20Sopenharmony_ci *   The OA unit writes reports of counters into a circular buffer, without
1158c2ecf20Sopenharmony_ci *   involvement from the CPU, making our PMU driver the first of a kind.
1168c2ecf20Sopenharmony_ci *
1178c2ecf20Sopenharmony_ci *   Given the way we were periodically forward data from the GPU-mapped, OA
1188c2ecf20Sopenharmony_ci *   buffer to perf's buffer, those bursts of sample writes looked to perf like
1198c2ecf20Sopenharmony_ci *   we were sampling too fast and so we had to subvert its throttling checks.
1208c2ecf20Sopenharmony_ci *
1218c2ecf20Sopenharmony_ci *   Perf supports groups of counters and allows those to be read via
1228c2ecf20Sopenharmony_ci *   transactions internally but transactions currently seem designed to be
1238c2ecf20Sopenharmony_ci *   explicitly initiated from the cpu (say in response to a userspace read())
1248c2ecf20Sopenharmony_ci *   and while we could pull a report out of the OA buffer we can't
1258c2ecf20Sopenharmony_ci *   trigger a report from the cpu on demand.
1268c2ecf20Sopenharmony_ci *
1278c2ecf20Sopenharmony_ci *   Related to being report based; the OA counters are configured in HW as a
1288c2ecf20Sopenharmony_ci *   set while perf generally expects counter configurations to be orthogonal.
1298c2ecf20Sopenharmony_ci *   Although counters can be associated with a group leader as they are
1308c2ecf20Sopenharmony_ci *   opened, there's no clear precedent for being able to provide group-wide
1318c2ecf20Sopenharmony_ci *   configuration attributes (for example we want to let userspace choose the
1328c2ecf20Sopenharmony_ci *   OA unit report format used to capture all counters in a set, or specify a
1338c2ecf20Sopenharmony_ci *   GPU context to filter metrics on). We avoided using perf's grouping
1348c2ecf20Sopenharmony_ci *   feature and forwarded OA reports to userspace via perf's 'raw' sample
1358c2ecf20Sopenharmony_ci *   field. This suited our userspace well considering how coupled the counters
1368c2ecf20Sopenharmony_ci *   are when dealing with normalizing. It would be inconvenient to split
1378c2ecf20Sopenharmony_ci *   counters up into separate events, only to require userspace to recombine
1388c2ecf20Sopenharmony_ci *   them. For Mesa it's also convenient to be forwarded raw, periodic reports
1398c2ecf20Sopenharmony_ci *   for combining with the side-band raw reports it captures using
1408c2ecf20Sopenharmony_ci *   MI_REPORT_PERF_COUNT commands.
1418c2ecf20Sopenharmony_ci *
1428c2ecf20Sopenharmony_ci *   - As a side note on perf's grouping feature; there was also some concern
1438c2ecf20Sopenharmony_ci *     that using PERF_FORMAT_GROUP as a way to pack together counter values
1448c2ecf20Sopenharmony_ci *     would quite drastically inflate our sample sizes, which would likely
1458c2ecf20Sopenharmony_ci *     lower the effective sampling resolutions we could use when the available
1468c2ecf20Sopenharmony_ci *     memory bandwidth is limited.
1478c2ecf20Sopenharmony_ci *
1488c2ecf20Sopenharmony_ci *     With the OA unit's report formats, counters are packed together as 32
1498c2ecf20Sopenharmony_ci *     or 40bit values, with the largest report size being 256 bytes.
1508c2ecf20Sopenharmony_ci *
1518c2ecf20Sopenharmony_ci *     PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
1528c2ecf20Sopenharmony_ci *     documented ordering to the values, implying PERF_FORMAT_ID must also be
1538c2ecf20Sopenharmony_ci *     used to add a 64bit ID before each value; giving 16 bytes per counter.
1548c2ecf20Sopenharmony_ci *
1558c2ecf20Sopenharmony_ci *   Related to counter orthogonality; we can't time share the OA unit, while
1568c2ecf20Sopenharmony_ci *   event scheduling is a central design idea within perf for allowing
1578c2ecf20Sopenharmony_ci *   userspace to open + enable more events than can be configured in HW at any
1588c2ecf20Sopenharmony_ci *   one time.  The OA unit is not designed to allow re-configuration while in
1598c2ecf20Sopenharmony_ci *   use. We can't reconfigure the OA unit without losing internal OA unit
1608c2ecf20Sopenharmony_ci *   state which we can't access explicitly to save and restore. Reconfiguring
1618c2ecf20Sopenharmony_ci *   the OA unit is also relatively slow, involving ~100 register writes. From
1628c2ecf20Sopenharmony_ci *   userspace Mesa also depends on a stable OA configuration when emitting
1638c2ecf20Sopenharmony_ci *   MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
1648c2ecf20Sopenharmony_ci *   disabled while there are outstanding MI_RPC commands lest we hang the
1658c2ecf20Sopenharmony_ci *   command streamer.
1668c2ecf20Sopenharmony_ci *
1678c2ecf20Sopenharmony_ci *   The contents of sample records aren't extensible by device drivers (i.e.
1688c2ecf20Sopenharmony_ci *   the sample_type bits). As an example; Sourab Gupta had been looking to
1698c2ecf20Sopenharmony_ci *   attach GPU timestamps to our OA samples. We were shoehorning OA reports
1708c2ecf20Sopenharmony_ci *   into sample records by using the 'raw' field, but it's tricky to pack more
1718c2ecf20Sopenharmony_ci *   than one thing into this field because events/core.c currently only lets a
1728c2ecf20Sopenharmony_ci *   pmu give a single raw data pointer plus len which will be copied into the
1738c2ecf20Sopenharmony_ci *   ring buffer. To include more than the OA report we'd have to copy the
1748c2ecf20Sopenharmony_ci *   report into an intermediate larger buffer. I'd been considering allowing a
1758c2ecf20Sopenharmony_ci *   vector of data+len values to be specified for copying the raw data, but
1768c2ecf20Sopenharmony_ci *   it felt like a kludge to being using the raw field for this purpose.
1778c2ecf20Sopenharmony_ci *
1788c2ecf20Sopenharmony_ci * - It felt like our perf based PMU was making some technical compromises
1798c2ecf20Sopenharmony_ci *   just for the sake of using perf:
1808c2ecf20Sopenharmony_ci *
1818c2ecf20Sopenharmony_ci *   perf_event_open() requires events to either relate to a pid or a specific
1828c2ecf20Sopenharmony_ci *   cpu core, while our device pmu related to neither.  Events opened with a
1838c2ecf20Sopenharmony_ci *   pid will be automatically enabled/disabled according to the scheduling of
1848c2ecf20Sopenharmony_ci *   that process - so not appropriate for us. When an event is related to a
1858c2ecf20Sopenharmony_ci *   cpu id, perf ensures pmu methods will be invoked via an inter process
1868c2ecf20Sopenharmony_ci *   interrupt on that core. To avoid invasive changes our userspace opened OA
1878c2ecf20Sopenharmony_ci *   perf events for a specific cpu. This was workable but it meant the
1888c2ecf20Sopenharmony_ci *   majority of the OA driver ran in atomic context, including all OA report
1898c2ecf20Sopenharmony_ci *   forwarding, which wasn't really necessary in our case and seems to make
1908c2ecf20Sopenharmony_ci *   our locking requirements somewhat complex as we handled the interaction
1918c2ecf20Sopenharmony_ci *   with the rest of the i915 driver.
1928c2ecf20Sopenharmony_ci */
1938c2ecf20Sopenharmony_ci
1948c2ecf20Sopenharmony_ci#include <linux/anon_inodes.h>
1958c2ecf20Sopenharmony_ci#include <linux/sizes.h>
1968c2ecf20Sopenharmony_ci#include <linux/uuid.h>
1978c2ecf20Sopenharmony_ci
1988c2ecf20Sopenharmony_ci#include "gem/i915_gem_context.h"
1998c2ecf20Sopenharmony_ci#include "gt/intel_engine_pm.h"
2008c2ecf20Sopenharmony_ci#include "gt/intel_engine_user.h"
2018c2ecf20Sopenharmony_ci#include "gt/intel_gt.h"
2028c2ecf20Sopenharmony_ci#include "gt/intel_lrc_reg.h"
2038c2ecf20Sopenharmony_ci#include "gt/intel_ring.h"
2048c2ecf20Sopenharmony_ci
2058c2ecf20Sopenharmony_ci#include "i915_drv.h"
2068c2ecf20Sopenharmony_ci#include "i915_perf.h"
2078c2ecf20Sopenharmony_ci
2088c2ecf20Sopenharmony_ci/* HW requires this to be a power of two, between 128k and 16M, though driver
2098c2ecf20Sopenharmony_ci * is currently generally designed assuming the largest 16M size is used such
2108c2ecf20Sopenharmony_ci * that the overflow cases are unlikely in normal operation.
2118c2ecf20Sopenharmony_ci */
2128c2ecf20Sopenharmony_ci#define OA_BUFFER_SIZE		SZ_16M
2138c2ecf20Sopenharmony_ci
2148c2ecf20Sopenharmony_ci#define OA_TAKEN(tail, head)	((tail - head) & (OA_BUFFER_SIZE - 1))
2158c2ecf20Sopenharmony_ci
2168c2ecf20Sopenharmony_ci/**
2178c2ecf20Sopenharmony_ci * DOC: OA Tail Pointer Race
2188c2ecf20Sopenharmony_ci *
2198c2ecf20Sopenharmony_ci * There's a HW race condition between OA unit tail pointer register updates and
2208c2ecf20Sopenharmony_ci * writes to memory whereby the tail pointer can sometimes get ahead of what's
2218c2ecf20Sopenharmony_ci * been written out to the OA buffer so far (in terms of what's visible to the
2228c2ecf20Sopenharmony_ci * CPU).
2238c2ecf20Sopenharmony_ci *
2248c2ecf20Sopenharmony_ci * Although this can be observed explicitly while copying reports to userspace
2258c2ecf20Sopenharmony_ci * by checking for a zeroed report-id field in tail reports, we want to account
2268c2ecf20Sopenharmony_ci * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
2278c2ecf20Sopenharmony_ci * redundant read() attempts.
2288c2ecf20Sopenharmony_ci *
2298c2ecf20Sopenharmony_ci * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
2308c2ecf20Sopenharmony_ci * in the OA buffer, starting from the tail reported by the HW until we find a
2318c2ecf20Sopenharmony_ci * report with its first 2 dwords not 0 meaning its previous report is
2328c2ecf20Sopenharmony_ci * completely in memory and ready to be read. Those dwords are also set to 0
2338c2ecf20Sopenharmony_ci * once read and the whole buffer is cleared upon OA buffer initialization. The
2348c2ecf20Sopenharmony_ci * first dword is the reason for this report while the second is the timestamp,
2358c2ecf20Sopenharmony_ci * making the chances of having those 2 fields at 0 fairly unlikely. A more
2368c2ecf20Sopenharmony_ci * detailed explanation is available in oa_buffer_check_unlocked().
2378c2ecf20Sopenharmony_ci *
2388c2ecf20Sopenharmony_ci * Most of the implementation details for this workaround are in
2398c2ecf20Sopenharmony_ci * oa_buffer_check_unlocked() and _append_oa_reports()
2408c2ecf20Sopenharmony_ci *
2418c2ecf20Sopenharmony_ci * Note for posterity: previously the driver used to define an effective tail
2428c2ecf20Sopenharmony_ci * pointer that lagged the real pointer by a 'tail margin' measured in bytes
2438c2ecf20Sopenharmony_ci * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
2448c2ecf20Sopenharmony_ci * This was flawed considering that the OA unit may also automatically generate
2458c2ecf20Sopenharmony_ci * non-periodic reports (such as on context switch) or the OA unit may be
2468c2ecf20Sopenharmony_ci * enabled without any periodic sampling.
2478c2ecf20Sopenharmony_ci */
2488c2ecf20Sopenharmony_ci#define OA_TAIL_MARGIN_NSEC	100000ULL
2498c2ecf20Sopenharmony_ci#define INVALID_TAIL_PTR	0xffffffff
2508c2ecf20Sopenharmony_ci
2518c2ecf20Sopenharmony_ci/* The default frequency for checking whether the OA unit has written new
2528c2ecf20Sopenharmony_ci * reports to the circular OA buffer...
2538c2ecf20Sopenharmony_ci */
2548c2ecf20Sopenharmony_ci#define DEFAULT_POLL_FREQUENCY_HZ 200
2558c2ecf20Sopenharmony_ci#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
2568c2ecf20Sopenharmony_ci
2578c2ecf20Sopenharmony_ci/* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
2588c2ecf20Sopenharmony_cistatic u32 i915_perf_stream_paranoid = true;
2598c2ecf20Sopenharmony_ci
2608c2ecf20Sopenharmony_ci/* The maximum exponent the hardware accepts is 63 (essentially it selects one
2618c2ecf20Sopenharmony_ci * of the 64bit timestamp bits to trigger reports from) but there's currently
2628c2ecf20Sopenharmony_ci * no known use case for sampling as infrequently as once per 47 thousand years.
2638c2ecf20Sopenharmony_ci *
2648c2ecf20Sopenharmony_ci * Since the timestamps included in OA reports are only 32bits it seems
2658c2ecf20Sopenharmony_ci * reasonable to limit the OA exponent where it's still possible to account for
2668c2ecf20Sopenharmony_ci * overflow in OA report timestamps.
2678c2ecf20Sopenharmony_ci */
2688c2ecf20Sopenharmony_ci#define OA_EXPONENT_MAX 31
2698c2ecf20Sopenharmony_ci
2708c2ecf20Sopenharmony_ci#define INVALID_CTX_ID 0xffffffff
2718c2ecf20Sopenharmony_ci
2728c2ecf20Sopenharmony_ci/* On Gen8+ automatically triggered OA reports include a 'reason' field... */
2738c2ecf20Sopenharmony_ci#define OAREPORT_REASON_MASK           0x3f
2748c2ecf20Sopenharmony_ci#define OAREPORT_REASON_MASK_EXTENDED  0x7f
2758c2ecf20Sopenharmony_ci#define OAREPORT_REASON_SHIFT          19
2768c2ecf20Sopenharmony_ci#define OAREPORT_REASON_TIMER          (1<<0)
2778c2ecf20Sopenharmony_ci#define OAREPORT_REASON_CTX_SWITCH     (1<<3)
2788c2ecf20Sopenharmony_ci#define OAREPORT_REASON_CLK_RATIO      (1<<5)
2798c2ecf20Sopenharmony_ci
2808c2ecf20Sopenharmony_ci
2818c2ecf20Sopenharmony_ci/* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
2828c2ecf20Sopenharmony_ci *
2838c2ecf20Sopenharmony_ci * The highest sampling frequency we can theoretically program the OA unit
2848c2ecf20Sopenharmony_ci * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
2858c2ecf20Sopenharmony_ci *
2868c2ecf20Sopenharmony_ci * Initialized just before we register the sysctl parameter.
2878c2ecf20Sopenharmony_ci */
2888c2ecf20Sopenharmony_cistatic int oa_sample_rate_hard_limit;
2898c2ecf20Sopenharmony_ci
2908c2ecf20Sopenharmony_ci/* Theoretically we can program the OA unit to sample every 160ns but don't
2918c2ecf20Sopenharmony_ci * allow that by default unless root...
2928c2ecf20Sopenharmony_ci *
2938c2ecf20Sopenharmony_ci * The default threshold of 100000Hz is based on perf's similar
2948c2ecf20Sopenharmony_ci * kernel.perf_event_max_sample_rate sysctl parameter.
2958c2ecf20Sopenharmony_ci */
2968c2ecf20Sopenharmony_cistatic u32 i915_oa_max_sample_rate = 100000;
2978c2ecf20Sopenharmony_ci
2988c2ecf20Sopenharmony_ci/* XXX: beware if future OA HW adds new report formats that the current
2998c2ecf20Sopenharmony_ci * code assumes all reports have a power-of-two size and ~(size - 1) can
3008c2ecf20Sopenharmony_ci * be used as a mask to align the OA tail pointer.
3018c2ecf20Sopenharmony_ci */
3028c2ecf20Sopenharmony_cistatic const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
3038c2ecf20Sopenharmony_ci	[I915_OA_FORMAT_A13]	    = { 0, 64 },
3048c2ecf20Sopenharmony_ci	[I915_OA_FORMAT_A29]	    = { 1, 128 },
3058c2ecf20Sopenharmony_ci	[I915_OA_FORMAT_A13_B8_C8]  = { 2, 128 },
3068c2ecf20Sopenharmony_ci	/* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
3078c2ecf20Sopenharmony_ci	[I915_OA_FORMAT_B4_C8]	    = { 4, 64 },
3088c2ecf20Sopenharmony_ci	[I915_OA_FORMAT_A45_B8_C8]  = { 5, 256 },
3098c2ecf20Sopenharmony_ci	[I915_OA_FORMAT_B4_C8_A16]  = { 6, 128 },
3108c2ecf20Sopenharmony_ci	[I915_OA_FORMAT_C4_B8]	    = { 7, 64 },
3118c2ecf20Sopenharmony_ci};
3128c2ecf20Sopenharmony_ci
3138c2ecf20Sopenharmony_cistatic const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
3148c2ecf20Sopenharmony_ci	[I915_OA_FORMAT_A12]		    = { 0, 64 },
3158c2ecf20Sopenharmony_ci	[I915_OA_FORMAT_A12_B8_C8]	    = { 2, 128 },
3168c2ecf20Sopenharmony_ci	[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
3178c2ecf20Sopenharmony_ci	[I915_OA_FORMAT_C4_B8]		    = { 7, 64 },
3188c2ecf20Sopenharmony_ci};
3198c2ecf20Sopenharmony_ci
3208c2ecf20Sopenharmony_cistatic const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = {
3218c2ecf20Sopenharmony_ci	[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
3228c2ecf20Sopenharmony_ci};
3238c2ecf20Sopenharmony_ci
3248c2ecf20Sopenharmony_ci#define SAMPLE_OA_REPORT      (1<<0)
3258c2ecf20Sopenharmony_ci
3268c2ecf20Sopenharmony_ci/**
3278c2ecf20Sopenharmony_ci * struct perf_open_properties - for validated properties given to open a stream
3288c2ecf20Sopenharmony_ci * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
3298c2ecf20Sopenharmony_ci * @single_context: Whether a single or all gpu contexts should be monitored
3308c2ecf20Sopenharmony_ci * @hold_preemption: Whether the preemption is disabled for the filtered
3318c2ecf20Sopenharmony_ci *                   context
3328c2ecf20Sopenharmony_ci * @ctx_handle: A gem ctx handle for use with @single_context
3338c2ecf20Sopenharmony_ci * @metrics_set: An ID for an OA unit metric set advertised via sysfs
3348c2ecf20Sopenharmony_ci * @oa_format: An OA unit HW report format
3358c2ecf20Sopenharmony_ci * @oa_periodic: Whether to enable periodic OA unit sampling
3368c2ecf20Sopenharmony_ci * @oa_period_exponent: The OA unit sampling period is derived from this
3378c2ecf20Sopenharmony_ci * @engine: The engine (typically rcs0) being monitored by the OA unit
3388c2ecf20Sopenharmony_ci * @has_sseu: Whether @sseu was specified by userspace
3398c2ecf20Sopenharmony_ci * @sseu: internal SSEU configuration computed either from the userspace
3408c2ecf20Sopenharmony_ci *        specified configuration in the opening parameters or a default value
3418c2ecf20Sopenharmony_ci *        (see get_default_sseu_config())
3428c2ecf20Sopenharmony_ci * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
3438c2ecf20Sopenharmony_ci * data availability
3448c2ecf20Sopenharmony_ci *
3458c2ecf20Sopenharmony_ci * As read_properties_unlocked() enumerates and validates the properties given
3468c2ecf20Sopenharmony_ci * to open a stream of metrics the configuration is built up in the structure
3478c2ecf20Sopenharmony_ci * which starts out zero initialized.
3488c2ecf20Sopenharmony_ci */
3498c2ecf20Sopenharmony_cistruct perf_open_properties {
3508c2ecf20Sopenharmony_ci	u32 sample_flags;
3518c2ecf20Sopenharmony_ci
3528c2ecf20Sopenharmony_ci	u64 single_context:1;
3538c2ecf20Sopenharmony_ci	u64 hold_preemption:1;
3548c2ecf20Sopenharmony_ci	u64 ctx_handle;
3558c2ecf20Sopenharmony_ci
3568c2ecf20Sopenharmony_ci	/* OA sampling state */
3578c2ecf20Sopenharmony_ci	int metrics_set;
3588c2ecf20Sopenharmony_ci	int oa_format;
3598c2ecf20Sopenharmony_ci	bool oa_periodic;
3608c2ecf20Sopenharmony_ci	int oa_period_exponent;
3618c2ecf20Sopenharmony_ci
3628c2ecf20Sopenharmony_ci	struct intel_engine_cs *engine;
3638c2ecf20Sopenharmony_ci
3648c2ecf20Sopenharmony_ci	bool has_sseu;
3658c2ecf20Sopenharmony_ci	struct intel_sseu sseu;
3668c2ecf20Sopenharmony_ci
3678c2ecf20Sopenharmony_ci	u64 poll_oa_period;
3688c2ecf20Sopenharmony_ci};
3698c2ecf20Sopenharmony_ci
3708c2ecf20Sopenharmony_cistruct i915_oa_config_bo {
3718c2ecf20Sopenharmony_ci	struct llist_node node;
3728c2ecf20Sopenharmony_ci
3738c2ecf20Sopenharmony_ci	struct i915_oa_config *oa_config;
3748c2ecf20Sopenharmony_ci	struct i915_vma *vma;
3758c2ecf20Sopenharmony_ci};
3768c2ecf20Sopenharmony_ci
3778c2ecf20Sopenharmony_cistatic struct ctl_table_header *sysctl_header;
3788c2ecf20Sopenharmony_ci
3798c2ecf20Sopenharmony_cistatic enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
3808c2ecf20Sopenharmony_ci
3818c2ecf20Sopenharmony_civoid i915_oa_config_release(struct kref *ref)
3828c2ecf20Sopenharmony_ci{
3838c2ecf20Sopenharmony_ci	struct i915_oa_config *oa_config =
3848c2ecf20Sopenharmony_ci		container_of(ref, typeof(*oa_config), ref);
3858c2ecf20Sopenharmony_ci
3868c2ecf20Sopenharmony_ci	kfree(oa_config->flex_regs);
3878c2ecf20Sopenharmony_ci	kfree(oa_config->b_counter_regs);
3888c2ecf20Sopenharmony_ci	kfree(oa_config->mux_regs);
3898c2ecf20Sopenharmony_ci
3908c2ecf20Sopenharmony_ci	kfree_rcu(oa_config, rcu);
3918c2ecf20Sopenharmony_ci}
3928c2ecf20Sopenharmony_ci
3938c2ecf20Sopenharmony_cistruct i915_oa_config *
3948c2ecf20Sopenharmony_cii915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
3958c2ecf20Sopenharmony_ci{
3968c2ecf20Sopenharmony_ci	struct i915_oa_config *oa_config;
3978c2ecf20Sopenharmony_ci
3988c2ecf20Sopenharmony_ci	rcu_read_lock();
3998c2ecf20Sopenharmony_ci	oa_config = idr_find(&perf->metrics_idr, metrics_set);
4008c2ecf20Sopenharmony_ci	if (oa_config)
4018c2ecf20Sopenharmony_ci		oa_config = i915_oa_config_get(oa_config);
4028c2ecf20Sopenharmony_ci	rcu_read_unlock();
4038c2ecf20Sopenharmony_ci
4048c2ecf20Sopenharmony_ci	return oa_config;
4058c2ecf20Sopenharmony_ci}
4068c2ecf20Sopenharmony_ci
4078c2ecf20Sopenharmony_cistatic void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
4088c2ecf20Sopenharmony_ci{
4098c2ecf20Sopenharmony_ci	i915_oa_config_put(oa_bo->oa_config);
4108c2ecf20Sopenharmony_ci	i915_vma_put(oa_bo->vma);
4118c2ecf20Sopenharmony_ci	kfree(oa_bo);
4128c2ecf20Sopenharmony_ci}
4138c2ecf20Sopenharmony_ci
4148c2ecf20Sopenharmony_cistatic u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
4158c2ecf20Sopenharmony_ci{
4168c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
4178c2ecf20Sopenharmony_ci
4188c2ecf20Sopenharmony_ci	return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
4198c2ecf20Sopenharmony_ci	       GEN12_OAG_OATAILPTR_MASK;
4208c2ecf20Sopenharmony_ci}
4218c2ecf20Sopenharmony_ci
4228c2ecf20Sopenharmony_cistatic u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
4238c2ecf20Sopenharmony_ci{
4248c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
4258c2ecf20Sopenharmony_ci
4268c2ecf20Sopenharmony_ci	return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
4278c2ecf20Sopenharmony_ci}
4288c2ecf20Sopenharmony_ci
4298c2ecf20Sopenharmony_cistatic u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
4308c2ecf20Sopenharmony_ci{
4318c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
4328c2ecf20Sopenharmony_ci	u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
4338c2ecf20Sopenharmony_ci
4348c2ecf20Sopenharmony_ci	return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
4358c2ecf20Sopenharmony_ci}
4368c2ecf20Sopenharmony_ci
4378c2ecf20Sopenharmony_ci/**
4388c2ecf20Sopenharmony_ci * oa_buffer_check_unlocked - check for data and update tail ptr state
4398c2ecf20Sopenharmony_ci * @stream: i915 stream instance
4408c2ecf20Sopenharmony_ci *
4418c2ecf20Sopenharmony_ci * This is either called via fops (for blocking reads in user ctx) or the poll
4428c2ecf20Sopenharmony_ci * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
4438c2ecf20Sopenharmony_ci * if there is data available for userspace to read.
4448c2ecf20Sopenharmony_ci *
4458c2ecf20Sopenharmony_ci * This function is central to providing a workaround for the OA unit tail
4468c2ecf20Sopenharmony_ci * pointer having a race with respect to what data is visible to the CPU.
4478c2ecf20Sopenharmony_ci * It is responsible for reading tail pointers from the hardware and giving
4488c2ecf20Sopenharmony_ci * the pointers time to 'age' before they are made available for reading.
4498c2ecf20Sopenharmony_ci * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
4508c2ecf20Sopenharmony_ci *
4518c2ecf20Sopenharmony_ci * Besides returning true when there is data available to read() this function
4528c2ecf20Sopenharmony_ci * also updates the tail, aging_tail and aging_timestamp in the oa_buffer
4538c2ecf20Sopenharmony_ci * object.
4548c2ecf20Sopenharmony_ci *
4558c2ecf20Sopenharmony_ci * Note: It's safe to read OA config state here unlocked, assuming that this is
4568c2ecf20Sopenharmony_ci * only called while the stream is enabled, while the global OA configuration
4578c2ecf20Sopenharmony_ci * can't be modified.
4588c2ecf20Sopenharmony_ci *
4598c2ecf20Sopenharmony_ci * Returns: %true if the OA buffer contains data, else %false
4608c2ecf20Sopenharmony_ci */
4618c2ecf20Sopenharmony_cistatic bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
4628c2ecf20Sopenharmony_ci{
4638c2ecf20Sopenharmony_ci	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
4648c2ecf20Sopenharmony_ci	int report_size = stream->oa_buffer.format_size;
4658c2ecf20Sopenharmony_ci	unsigned long flags;
4668c2ecf20Sopenharmony_ci	bool pollin;
4678c2ecf20Sopenharmony_ci	u32 hw_tail;
4688c2ecf20Sopenharmony_ci	u64 now;
4698c2ecf20Sopenharmony_ci
4708c2ecf20Sopenharmony_ci	/* We have to consider the (unlikely) possibility that read() errors
4718c2ecf20Sopenharmony_ci	 * could result in an OA buffer reset which might reset the head and
4728c2ecf20Sopenharmony_ci	 * tail state.
4738c2ecf20Sopenharmony_ci	 */
4748c2ecf20Sopenharmony_ci	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
4758c2ecf20Sopenharmony_ci
4768c2ecf20Sopenharmony_ci	hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
4778c2ecf20Sopenharmony_ci
4788c2ecf20Sopenharmony_ci	/* The tail pointer increases in 64 byte increments,
4798c2ecf20Sopenharmony_ci	 * not in report_size steps...
4808c2ecf20Sopenharmony_ci	 */
4818c2ecf20Sopenharmony_ci	hw_tail &= ~(report_size - 1);
4828c2ecf20Sopenharmony_ci
4838c2ecf20Sopenharmony_ci	now = ktime_get_mono_fast_ns();
4848c2ecf20Sopenharmony_ci
4858c2ecf20Sopenharmony_ci	if (hw_tail == stream->oa_buffer.aging_tail &&
4868c2ecf20Sopenharmony_ci	    (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) {
4878c2ecf20Sopenharmony_ci		/* If the HW tail hasn't move since the last check and the HW
4888c2ecf20Sopenharmony_ci		 * tail has been aging for long enough, declare it the new
4898c2ecf20Sopenharmony_ci		 * tail.
4908c2ecf20Sopenharmony_ci		 */
4918c2ecf20Sopenharmony_ci		stream->oa_buffer.tail = stream->oa_buffer.aging_tail;
4928c2ecf20Sopenharmony_ci	} else {
4938c2ecf20Sopenharmony_ci		u32 head, tail, aged_tail;
4948c2ecf20Sopenharmony_ci
4958c2ecf20Sopenharmony_ci		/* NB: The head we observe here might effectively be a little
4968c2ecf20Sopenharmony_ci		 * out of date. If a read() is in progress, the head could be
4978c2ecf20Sopenharmony_ci		 * anywhere between this head and stream->oa_buffer.tail.
4988c2ecf20Sopenharmony_ci		 */
4998c2ecf20Sopenharmony_ci		head = stream->oa_buffer.head - gtt_offset;
5008c2ecf20Sopenharmony_ci		aged_tail = stream->oa_buffer.tail - gtt_offset;
5018c2ecf20Sopenharmony_ci
5028c2ecf20Sopenharmony_ci		hw_tail -= gtt_offset;
5038c2ecf20Sopenharmony_ci		tail = hw_tail;
5048c2ecf20Sopenharmony_ci
5058c2ecf20Sopenharmony_ci		/* Walk the stream backward until we find a report with dword 0
5068c2ecf20Sopenharmony_ci		 * & 1 not at 0. Since the circular buffer pointers progress by
5078c2ecf20Sopenharmony_ci		 * increments of 64 bytes and that reports can be up to 256
5088c2ecf20Sopenharmony_ci		 * bytes long, we can't tell whether a report has fully landed
5098c2ecf20Sopenharmony_ci		 * in memory before the first 2 dwords of the following report
5108c2ecf20Sopenharmony_ci		 * have effectively landed.
5118c2ecf20Sopenharmony_ci		 *
5128c2ecf20Sopenharmony_ci		 * This is assuming that the writes of the OA unit land in
5138c2ecf20Sopenharmony_ci		 * memory in the order they were written to.
5148c2ecf20Sopenharmony_ci		 * If not : (╯°□°)╯︵ ┻━┻
5158c2ecf20Sopenharmony_ci		 */
5168c2ecf20Sopenharmony_ci		while (OA_TAKEN(tail, aged_tail) >= report_size) {
5178c2ecf20Sopenharmony_ci			u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail);
5188c2ecf20Sopenharmony_ci
5198c2ecf20Sopenharmony_ci			if (report32[0] != 0 || report32[1] != 0)
5208c2ecf20Sopenharmony_ci				break;
5218c2ecf20Sopenharmony_ci
5228c2ecf20Sopenharmony_ci			tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
5238c2ecf20Sopenharmony_ci		}
5248c2ecf20Sopenharmony_ci
5258c2ecf20Sopenharmony_ci		if (OA_TAKEN(hw_tail, tail) > report_size &&
5268c2ecf20Sopenharmony_ci		    __ratelimit(&stream->perf->tail_pointer_race))
5278c2ecf20Sopenharmony_ci			DRM_NOTE("unlanded report(s) head=0x%x "
5288c2ecf20Sopenharmony_ci				 "tail=0x%x hw_tail=0x%x\n",
5298c2ecf20Sopenharmony_ci				 head, tail, hw_tail);
5308c2ecf20Sopenharmony_ci
5318c2ecf20Sopenharmony_ci		stream->oa_buffer.tail = gtt_offset + tail;
5328c2ecf20Sopenharmony_ci		stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
5338c2ecf20Sopenharmony_ci		stream->oa_buffer.aging_timestamp = now;
5348c2ecf20Sopenharmony_ci	}
5358c2ecf20Sopenharmony_ci
5368c2ecf20Sopenharmony_ci	pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,
5378c2ecf20Sopenharmony_ci			  stream->oa_buffer.head - gtt_offset) >= report_size;
5388c2ecf20Sopenharmony_ci
5398c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
5408c2ecf20Sopenharmony_ci
5418c2ecf20Sopenharmony_ci	return pollin;
5428c2ecf20Sopenharmony_ci}
5438c2ecf20Sopenharmony_ci
5448c2ecf20Sopenharmony_ci/**
5458c2ecf20Sopenharmony_ci * append_oa_status - Appends a status record to a userspace read() buffer.
5468c2ecf20Sopenharmony_ci * @stream: An i915-perf stream opened for OA metrics
5478c2ecf20Sopenharmony_ci * @buf: destination buffer given by userspace
5488c2ecf20Sopenharmony_ci * @count: the number of bytes userspace wants to read
5498c2ecf20Sopenharmony_ci * @offset: (inout): the current position for writing into @buf
5508c2ecf20Sopenharmony_ci * @type: The kind of status to report to userspace
5518c2ecf20Sopenharmony_ci *
5528c2ecf20Sopenharmony_ci * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
5538c2ecf20Sopenharmony_ci * into the userspace read() buffer.
5548c2ecf20Sopenharmony_ci *
5558c2ecf20Sopenharmony_ci * The @buf @offset will only be updated on success.
5568c2ecf20Sopenharmony_ci *
5578c2ecf20Sopenharmony_ci * Returns: 0 on success, negative error code on failure.
5588c2ecf20Sopenharmony_ci */
5598c2ecf20Sopenharmony_cistatic int append_oa_status(struct i915_perf_stream *stream,
5608c2ecf20Sopenharmony_ci			    char __user *buf,
5618c2ecf20Sopenharmony_ci			    size_t count,
5628c2ecf20Sopenharmony_ci			    size_t *offset,
5638c2ecf20Sopenharmony_ci			    enum drm_i915_perf_record_type type)
5648c2ecf20Sopenharmony_ci{
5658c2ecf20Sopenharmony_ci	struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
5668c2ecf20Sopenharmony_ci
5678c2ecf20Sopenharmony_ci	if ((count - *offset) < header.size)
5688c2ecf20Sopenharmony_ci		return -ENOSPC;
5698c2ecf20Sopenharmony_ci
5708c2ecf20Sopenharmony_ci	if (copy_to_user(buf + *offset, &header, sizeof(header)))
5718c2ecf20Sopenharmony_ci		return -EFAULT;
5728c2ecf20Sopenharmony_ci
5738c2ecf20Sopenharmony_ci	(*offset) += header.size;
5748c2ecf20Sopenharmony_ci
5758c2ecf20Sopenharmony_ci	return 0;
5768c2ecf20Sopenharmony_ci}
5778c2ecf20Sopenharmony_ci
5788c2ecf20Sopenharmony_ci/**
5798c2ecf20Sopenharmony_ci * append_oa_sample - Copies single OA report into userspace read() buffer.
5808c2ecf20Sopenharmony_ci * @stream: An i915-perf stream opened for OA metrics
5818c2ecf20Sopenharmony_ci * @buf: destination buffer given by userspace
5828c2ecf20Sopenharmony_ci * @count: the number of bytes userspace wants to read
5838c2ecf20Sopenharmony_ci * @offset: (inout): the current position for writing into @buf
5848c2ecf20Sopenharmony_ci * @report: A single OA report to (optionally) include as part of the sample
5858c2ecf20Sopenharmony_ci *
5868c2ecf20Sopenharmony_ci * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
5878c2ecf20Sopenharmony_ci * properties when opening a stream, tracked as `stream->sample_flags`. This
5888c2ecf20Sopenharmony_ci * function copies the requested components of a single sample to the given
5898c2ecf20Sopenharmony_ci * read() @buf.
5908c2ecf20Sopenharmony_ci *
5918c2ecf20Sopenharmony_ci * The @buf @offset will only be updated on success.
5928c2ecf20Sopenharmony_ci *
5938c2ecf20Sopenharmony_ci * Returns: 0 on success, negative error code on failure.
5948c2ecf20Sopenharmony_ci */
5958c2ecf20Sopenharmony_cistatic int append_oa_sample(struct i915_perf_stream *stream,
5968c2ecf20Sopenharmony_ci			    char __user *buf,
5978c2ecf20Sopenharmony_ci			    size_t count,
5988c2ecf20Sopenharmony_ci			    size_t *offset,
5998c2ecf20Sopenharmony_ci			    const u8 *report)
6008c2ecf20Sopenharmony_ci{
6018c2ecf20Sopenharmony_ci	int report_size = stream->oa_buffer.format_size;
6028c2ecf20Sopenharmony_ci	struct drm_i915_perf_record_header header;
6038c2ecf20Sopenharmony_ci
6048c2ecf20Sopenharmony_ci	header.type = DRM_I915_PERF_RECORD_SAMPLE;
6058c2ecf20Sopenharmony_ci	header.pad = 0;
6068c2ecf20Sopenharmony_ci	header.size = stream->sample_size;
6078c2ecf20Sopenharmony_ci
6088c2ecf20Sopenharmony_ci	if ((count - *offset) < header.size)
6098c2ecf20Sopenharmony_ci		return -ENOSPC;
6108c2ecf20Sopenharmony_ci
6118c2ecf20Sopenharmony_ci	buf += *offset;
6128c2ecf20Sopenharmony_ci	if (copy_to_user(buf, &header, sizeof(header)))
6138c2ecf20Sopenharmony_ci		return -EFAULT;
6148c2ecf20Sopenharmony_ci	buf += sizeof(header);
6158c2ecf20Sopenharmony_ci
6168c2ecf20Sopenharmony_ci	if (copy_to_user(buf, report, report_size))
6178c2ecf20Sopenharmony_ci		return -EFAULT;
6188c2ecf20Sopenharmony_ci
6198c2ecf20Sopenharmony_ci	(*offset) += header.size;
6208c2ecf20Sopenharmony_ci
6218c2ecf20Sopenharmony_ci	return 0;
6228c2ecf20Sopenharmony_ci}
6238c2ecf20Sopenharmony_ci
6248c2ecf20Sopenharmony_ci/**
6258c2ecf20Sopenharmony_ci * Copies all buffered OA reports into userspace read() buffer.
6268c2ecf20Sopenharmony_ci * @stream: An i915-perf stream opened for OA metrics
6278c2ecf20Sopenharmony_ci * @buf: destination buffer given by userspace
6288c2ecf20Sopenharmony_ci * @count: the number of bytes userspace wants to read
6298c2ecf20Sopenharmony_ci * @offset: (inout): the current position for writing into @buf
6308c2ecf20Sopenharmony_ci *
6318c2ecf20Sopenharmony_ci * Notably any error condition resulting in a short read (-%ENOSPC or
6328c2ecf20Sopenharmony_ci * -%EFAULT) will be returned even though one or more records may
6338c2ecf20Sopenharmony_ci * have been successfully copied. In this case it's up to the caller
6348c2ecf20Sopenharmony_ci * to decide if the error should be squashed before returning to
6358c2ecf20Sopenharmony_ci * userspace.
6368c2ecf20Sopenharmony_ci *
6378c2ecf20Sopenharmony_ci * Note: reports are consumed from the head, and appended to the
6388c2ecf20Sopenharmony_ci * tail, so the tail chases the head?... If you think that's mad
6398c2ecf20Sopenharmony_ci * and back-to-front you're not alone, but this follows the
6408c2ecf20Sopenharmony_ci * Gen PRM naming convention.
6418c2ecf20Sopenharmony_ci *
6428c2ecf20Sopenharmony_ci * Returns: 0 on success, negative error code on failure.
6438c2ecf20Sopenharmony_ci */
6448c2ecf20Sopenharmony_cistatic int gen8_append_oa_reports(struct i915_perf_stream *stream,
6458c2ecf20Sopenharmony_ci				  char __user *buf,
6468c2ecf20Sopenharmony_ci				  size_t count,
6478c2ecf20Sopenharmony_ci				  size_t *offset)
6488c2ecf20Sopenharmony_ci{
6498c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
6508c2ecf20Sopenharmony_ci	int report_size = stream->oa_buffer.format_size;
6518c2ecf20Sopenharmony_ci	u8 *oa_buf_base = stream->oa_buffer.vaddr;
6528c2ecf20Sopenharmony_ci	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
6538c2ecf20Sopenharmony_ci	u32 mask = (OA_BUFFER_SIZE - 1);
6548c2ecf20Sopenharmony_ci	size_t start_offset = *offset;
6558c2ecf20Sopenharmony_ci	unsigned long flags;
6568c2ecf20Sopenharmony_ci	u32 head, tail;
6578c2ecf20Sopenharmony_ci	u32 taken;
6588c2ecf20Sopenharmony_ci	int ret = 0;
6598c2ecf20Sopenharmony_ci
6608c2ecf20Sopenharmony_ci	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
6618c2ecf20Sopenharmony_ci		return -EIO;
6628c2ecf20Sopenharmony_ci
6638c2ecf20Sopenharmony_ci	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
6648c2ecf20Sopenharmony_ci
6658c2ecf20Sopenharmony_ci	head = stream->oa_buffer.head;
6668c2ecf20Sopenharmony_ci	tail = stream->oa_buffer.tail;
6678c2ecf20Sopenharmony_ci
6688c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
6698c2ecf20Sopenharmony_ci
6708c2ecf20Sopenharmony_ci	/*
6718c2ecf20Sopenharmony_ci	 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
6728c2ecf20Sopenharmony_ci	 * while indexing relative to oa_buf_base.
6738c2ecf20Sopenharmony_ci	 */
6748c2ecf20Sopenharmony_ci	head -= gtt_offset;
6758c2ecf20Sopenharmony_ci	tail -= gtt_offset;
6768c2ecf20Sopenharmony_ci
6778c2ecf20Sopenharmony_ci	/*
6788c2ecf20Sopenharmony_ci	 * An out of bounds or misaligned head or tail pointer implies a driver
6798c2ecf20Sopenharmony_ci	 * bug since we validate + align the tail pointers we read from the
6808c2ecf20Sopenharmony_ci	 * hardware and we are in full control of the head pointer which should
6818c2ecf20Sopenharmony_ci	 * only be incremented by multiples of the report size (notably also
6828c2ecf20Sopenharmony_ci	 * all a power of two).
6838c2ecf20Sopenharmony_ci	 */
6848c2ecf20Sopenharmony_ci	if (drm_WARN_ONCE(&uncore->i915->drm,
6858c2ecf20Sopenharmony_ci			  head > OA_BUFFER_SIZE || head % report_size ||
6868c2ecf20Sopenharmony_ci			  tail > OA_BUFFER_SIZE || tail % report_size,
6878c2ecf20Sopenharmony_ci			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
6888c2ecf20Sopenharmony_ci			  head, tail))
6898c2ecf20Sopenharmony_ci		return -EIO;
6908c2ecf20Sopenharmony_ci
6918c2ecf20Sopenharmony_ci
6928c2ecf20Sopenharmony_ci	for (/* none */;
6938c2ecf20Sopenharmony_ci	     (taken = OA_TAKEN(tail, head));
6948c2ecf20Sopenharmony_ci	     head = (head + report_size) & mask) {
6958c2ecf20Sopenharmony_ci		u8 *report = oa_buf_base + head;
6968c2ecf20Sopenharmony_ci		u32 *report32 = (void *)report;
6978c2ecf20Sopenharmony_ci		u32 ctx_id;
6988c2ecf20Sopenharmony_ci		u32 reason;
6998c2ecf20Sopenharmony_ci
7008c2ecf20Sopenharmony_ci		/*
7018c2ecf20Sopenharmony_ci		 * All the report sizes factor neatly into the buffer
7028c2ecf20Sopenharmony_ci		 * size so we never expect to see a report split
7038c2ecf20Sopenharmony_ci		 * between the beginning and end of the buffer.
7048c2ecf20Sopenharmony_ci		 *
7058c2ecf20Sopenharmony_ci		 * Given the initial alignment check a misalignment
7068c2ecf20Sopenharmony_ci		 * here would imply a driver bug that would result
7078c2ecf20Sopenharmony_ci		 * in an overrun.
7088c2ecf20Sopenharmony_ci		 */
7098c2ecf20Sopenharmony_ci		if (drm_WARN_ON(&uncore->i915->drm,
7108c2ecf20Sopenharmony_ci				(OA_BUFFER_SIZE - head) < report_size)) {
7118c2ecf20Sopenharmony_ci			drm_err(&uncore->i915->drm,
7128c2ecf20Sopenharmony_ci				"Spurious OA head ptr: non-integral report offset\n");
7138c2ecf20Sopenharmony_ci			break;
7148c2ecf20Sopenharmony_ci		}
7158c2ecf20Sopenharmony_ci
7168c2ecf20Sopenharmony_ci		/*
7178c2ecf20Sopenharmony_ci		 * The reason field includes flags identifying what
7188c2ecf20Sopenharmony_ci		 * triggered this specific report (mostly timer
7198c2ecf20Sopenharmony_ci		 * triggered or e.g. due to a context switch).
7208c2ecf20Sopenharmony_ci		 *
7218c2ecf20Sopenharmony_ci		 * This field is never expected to be zero so we can
7228c2ecf20Sopenharmony_ci		 * check that the report isn't invalid before copying
7238c2ecf20Sopenharmony_ci		 * it to userspace...
7248c2ecf20Sopenharmony_ci		 */
7258c2ecf20Sopenharmony_ci		reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
7268c2ecf20Sopenharmony_ci			  (IS_GEN(stream->perf->i915, 12) ?
7278c2ecf20Sopenharmony_ci			   OAREPORT_REASON_MASK_EXTENDED :
7288c2ecf20Sopenharmony_ci			   OAREPORT_REASON_MASK));
7298c2ecf20Sopenharmony_ci		if (reason == 0) {
7308c2ecf20Sopenharmony_ci			if (__ratelimit(&stream->perf->spurious_report_rs))
7318c2ecf20Sopenharmony_ci				DRM_NOTE("Skipping spurious, invalid OA report\n");
7328c2ecf20Sopenharmony_ci			continue;
7338c2ecf20Sopenharmony_ci		}
7348c2ecf20Sopenharmony_ci
7358c2ecf20Sopenharmony_ci		ctx_id = report32[2] & stream->specific_ctx_id_mask;
7368c2ecf20Sopenharmony_ci
7378c2ecf20Sopenharmony_ci		/*
7388c2ecf20Sopenharmony_ci		 * Squash whatever is in the CTX_ID field if it's marked as
7398c2ecf20Sopenharmony_ci		 * invalid to be sure we avoid false-positive, single-context
7408c2ecf20Sopenharmony_ci		 * filtering below...
7418c2ecf20Sopenharmony_ci		 *
7428c2ecf20Sopenharmony_ci		 * Note: that we don't clear the valid_ctx_bit so userspace can
7438c2ecf20Sopenharmony_ci		 * understand that the ID has been squashed by the kernel.
7448c2ecf20Sopenharmony_ci		 */
7458c2ecf20Sopenharmony_ci		if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
7468c2ecf20Sopenharmony_ci		    INTEL_GEN(stream->perf->i915) <= 11)
7478c2ecf20Sopenharmony_ci			ctx_id = report32[2] = INVALID_CTX_ID;
7488c2ecf20Sopenharmony_ci
7498c2ecf20Sopenharmony_ci		/*
7508c2ecf20Sopenharmony_ci		 * NB: For Gen 8 the OA unit no longer supports clock gating
7518c2ecf20Sopenharmony_ci		 * off for a specific context and the kernel can't securely
7528c2ecf20Sopenharmony_ci		 * stop the counters from updating as system-wide / global
7538c2ecf20Sopenharmony_ci		 * values.
7548c2ecf20Sopenharmony_ci		 *
7558c2ecf20Sopenharmony_ci		 * Automatic reports now include a context ID so reports can be
7568c2ecf20Sopenharmony_ci		 * filtered on the cpu but it's not worth trying to
7578c2ecf20Sopenharmony_ci		 * automatically subtract/hide counter progress for other
7588c2ecf20Sopenharmony_ci		 * contexts while filtering since we can't stop userspace
7598c2ecf20Sopenharmony_ci		 * issuing MI_REPORT_PERF_COUNT commands which would still
7608c2ecf20Sopenharmony_ci		 * provide a side-band view of the real values.
7618c2ecf20Sopenharmony_ci		 *
7628c2ecf20Sopenharmony_ci		 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
7638c2ecf20Sopenharmony_ci		 * to normalize counters for a single filtered context then it
7648c2ecf20Sopenharmony_ci		 * needs be forwarded bookend context-switch reports so that it
7658c2ecf20Sopenharmony_ci		 * can track switches in between MI_REPORT_PERF_COUNT commands
7668c2ecf20Sopenharmony_ci		 * and can itself subtract/ignore the progress of counters
7678c2ecf20Sopenharmony_ci		 * associated with other contexts. Note that the hardware
7688c2ecf20Sopenharmony_ci		 * automatically triggers reports when switching to a new
7698c2ecf20Sopenharmony_ci		 * context which are tagged with the ID of the newly active
7708c2ecf20Sopenharmony_ci		 * context. To avoid the complexity (and likely fragility) of
7718c2ecf20Sopenharmony_ci		 * reading ahead while parsing reports to try and minimize
7728c2ecf20Sopenharmony_ci		 * forwarding redundant context switch reports (i.e. between
7738c2ecf20Sopenharmony_ci		 * other, unrelated contexts) we simply elect to forward them
7748c2ecf20Sopenharmony_ci		 * all.
7758c2ecf20Sopenharmony_ci		 *
7768c2ecf20Sopenharmony_ci		 * We don't rely solely on the reason field to identify context
7778c2ecf20Sopenharmony_ci		 * switches since it's not-uncommon for periodic samples to
7788c2ecf20Sopenharmony_ci		 * identify a switch before any 'context switch' report.
7798c2ecf20Sopenharmony_ci		 */
7808c2ecf20Sopenharmony_ci		if (!stream->perf->exclusive_stream->ctx ||
7818c2ecf20Sopenharmony_ci		    stream->specific_ctx_id == ctx_id ||
7828c2ecf20Sopenharmony_ci		    stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
7838c2ecf20Sopenharmony_ci		    reason & OAREPORT_REASON_CTX_SWITCH) {
7848c2ecf20Sopenharmony_ci
7858c2ecf20Sopenharmony_ci			/*
7868c2ecf20Sopenharmony_ci			 * While filtering for a single context we avoid
7878c2ecf20Sopenharmony_ci			 * leaking the IDs of other contexts.
7888c2ecf20Sopenharmony_ci			 */
7898c2ecf20Sopenharmony_ci			if (stream->perf->exclusive_stream->ctx &&
7908c2ecf20Sopenharmony_ci			    stream->specific_ctx_id != ctx_id) {
7918c2ecf20Sopenharmony_ci				report32[2] = INVALID_CTX_ID;
7928c2ecf20Sopenharmony_ci			}
7938c2ecf20Sopenharmony_ci
7948c2ecf20Sopenharmony_ci			ret = append_oa_sample(stream, buf, count, offset,
7958c2ecf20Sopenharmony_ci					       report);
7968c2ecf20Sopenharmony_ci			if (ret)
7978c2ecf20Sopenharmony_ci				break;
7988c2ecf20Sopenharmony_ci
7998c2ecf20Sopenharmony_ci			stream->oa_buffer.last_ctx_id = ctx_id;
8008c2ecf20Sopenharmony_ci		}
8018c2ecf20Sopenharmony_ci
8028c2ecf20Sopenharmony_ci		/*
8038c2ecf20Sopenharmony_ci		 * Clear out the first 2 dword as a mean to detect unlanded
8048c2ecf20Sopenharmony_ci		 * reports.
8058c2ecf20Sopenharmony_ci		 */
8068c2ecf20Sopenharmony_ci		report32[0] = 0;
8078c2ecf20Sopenharmony_ci		report32[1] = 0;
8088c2ecf20Sopenharmony_ci	}
8098c2ecf20Sopenharmony_ci
8108c2ecf20Sopenharmony_ci	if (start_offset != *offset) {
8118c2ecf20Sopenharmony_ci		i915_reg_t oaheadptr;
8128c2ecf20Sopenharmony_ci
8138c2ecf20Sopenharmony_ci		oaheadptr = IS_GEN(stream->perf->i915, 12) ?
8148c2ecf20Sopenharmony_ci			    GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
8158c2ecf20Sopenharmony_ci
8168c2ecf20Sopenharmony_ci		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
8178c2ecf20Sopenharmony_ci
8188c2ecf20Sopenharmony_ci		/*
8198c2ecf20Sopenharmony_ci		 * We removed the gtt_offset for the copy loop above, indexing
8208c2ecf20Sopenharmony_ci		 * relative to oa_buf_base so put back here...
8218c2ecf20Sopenharmony_ci		 */
8228c2ecf20Sopenharmony_ci		head += gtt_offset;
8238c2ecf20Sopenharmony_ci		intel_uncore_write(uncore, oaheadptr,
8248c2ecf20Sopenharmony_ci				   head & GEN12_OAG_OAHEADPTR_MASK);
8258c2ecf20Sopenharmony_ci		stream->oa_buffer.head = head;
8268c2ecf20Sopenharmony_ci
8278c2ecf20Sopenharmony_ci		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
8288c2ecf20Sopenharmony_ci	}
8298c2ecf20Sopenharmony_ci
8308c2ecf20Sopenharmony_ci	return ret;
8318c2ecf20Sopenharmony_ci}
8328c2ecf20Sopenharmony_ci
8338c2ecf20Sopenharmony_ci/**
8348c2ecf20Sopenharmony_ci * gen8_oa_read - copy status records then buffered OA reports
8358c2ecf20Sopenharmony_ci * @stream: An i915-perf stream opened for OA metrics
8368c2ecf20Sopenharmony_ci * @buf: destination buffer given by userspace
8378c2ecf20Sopenharmony_ci * @count: the number of bytes userspace wants to read
8388c2ecf20Sopenharmony_ci * @offset: (inout): the current position for writing into @buf
8398c2ecf20Sopenharmony_ci *
8408c2ecf20Sopenharmony_ci * Checks OA unit status registers and if necessary appends corresponding
8418c2ecf20Sopenharmony_ci * status records for userspace (such as for a buffer full condition) and then
8428c2ecf20Sopenharmony_ci * initiate appending any buffered OA reports.
8438c2ecf20Sopenharmony_ci *
8448c2ecf20Sopenharmony_ci * Updates @offset according to the number of bytes successfully copied into
8458c2ecf20Sopenharmony_ci * the userspace buffer.
8468c2ecf20Sopenharmony_ci *
8478c2ecf20Sopenharmony_ci * NB: some data may be successfully copied to the userspace buffer
8488c2ecf20Sopenharmony_ci * even if an error is returned, and this is reflected in the
8498c2ecf20Sopenharmony_ci * updated @offset.
8508c2ecf20Sopenharmony_ci *
8518c2ecf20Sopenharmony_ci * Returns: zero on success or a negative error code
8528c2ecf20Sopenharmony_ci */
8538c2ecf20Sopenharmony_cistatic int gen8_oa_read(struct i915_perf_stream *stream,
8548c2ecf20Sopenharmony_ci			char __user *buf,
8558c2ecf20Sopenharmony_ci			size_t count,
8568c2ecf20Sopenharmony_ci			size_t *offset)
8578c2ecf20Sopenharmony_ci{
8588c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
8598c2ecf20Sopenharmony_ci	u32 oastatus;
8608c2ecf20Sopenharmony_ci	i915_reg_t oastatus_reg;
8618c2ecf20Sopenharmony_ci	int ret;
8628c2ecf20Sopenharmony_ci
8638c2ecf20Sopenharmony_ci	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
8648c2ecf20Sopenharmony_ci		return -EIO;
8658c2ecf20Sopenharmony_ci
8668c2ecf20Sopenharmony_ci	oastatus_reg = IS_GEN(stream->perf->i915, 12) ?
8678c2ecf20Sopenharmony_ci		       GEN12_OAG_OASTATUS : GEN8_OASTATUS;
8688c2ecf20Sopenharmony_ci
8698c2ecf20Sopenharmony_ci	oastatus = intel_uncore_read(uncore, oastatus_reg);
8708c2ecf20Sopenharmony_ci
8718c2ecf20Sopenharmony_ci	/*
8728c2ecf20Sopenharmony_ci	 * We treat OABUFFER_OVERFLOW as a significant error:
8738c2ecf20Sopenharmony_ci	 *
8748c2ecf20Sopenharmony_ci	 * Although theoretically we could handle this more gracefully
8758c2ecf20Sopenharmony_ci	 * sometimes, some Gens don't correctly suppress certain
8768c2ecf20Sopenharmony_ci	 * automatically triggered reports in this condition and so we
8778c2ecf20Sopenharmony_ci	 * have to assume that old reports are now being trampled
8788c2ecf20Sopenharmony_ci	 * over.
8798c2ecf20Sopenharmony_ci	 *
8808c2ecf20Sopenharmony_ci	 * Considering how we don't currently give userspace control
8818c2ecf20Sopenharmony_ci	 * over the OA buffer size and always configure a large 16MB
8828c2ecf20Sopenharmony_ci	 * buffer, then a buffer overflow does anyway likely indicate
8838c2ecf20Sopenharmony_ci	 * that something has gone quite badly wrong.
8848c2ecf20Sopenharmony_ci	 */
8858c2ecf20Sopenharmony_ci	if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
8868c2ecf20Sopenharmony_ci		ret = append_oa_status(stream, buf, count, offset,
8878c2ecf20Sopenharmony_ci				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
8888c2ecf20Sopenharmony_ci		if (ret)
8898c2ecf20Sopenharmony_ci			return ret;
8908c2ecf20Sopenharmony_ci
8918c2ecf20Sopenharmony_ci		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
8928c2ecf20Sopenharmony_ci			  stream->period_exponent);
8938c2ecf20Sopenharmony_ci
8948c2ecf20Sopenharmony_ci		stream->perf->ops.oa_disable(stream);
8958c2ecf20Sopenharmony_ci		stream->perf->ops.oa_enable(stream);
8968c2ecf20Sopenharmony_ci
8978c2ecf20Sopenharmony_ci		/*
8988c2ecf20Sopenharmony_ci		 * Note: .oa_enable() is expected to re-init the oabuffer and
8998c2ecf20Sopenharmony_ci		 * reset GEN8_OASTATUS for us
9008c2ecf20Sopenharmony_ci		 */
9018c2ecf20Sopenharmony_ci		oastatus = intel_uncore_read(uncore, oastatus_reg);
9028c2ecf20Sopenharmony_ci	}
9038c2ecf20Sopenharmony_ci
9048c2ecf20Sopenharmony_ci	if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
9058c2ecf20Sopenharmony_ci		ret = append_oa_status(stream, buf, count, offset,
9068c2ecf20Sopenharmony_ci				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
9078c2ecf20Sopenharmony_ci		if (ret)
9088c2ecf20Sopenharmony_ci			return ret;
9098c2ecf20Sopenharmony_ci
9108c2ecf20Sopenharmony_ci		intel_uncore_rmw(uncore, oastatus_reg,
9118c2ecf20Sopenharmony_ci				 GEN8_OASTATUS_COUNTER_OVERFLOW |
9128c2ecf20Sopenharmony_ci				 GEN8_OASTATUS_REPORT_LOST,
9138c2ecf20Sopenharmony_ci				 IS_GEN_RANGE(uncore->i915, 8, 10) ?
9148c2ecf20Sopenharmony_ci				 (GEN8_OASTATUS_HEAD_POINTER_WRAP |
9158c2ecf20Sopenharmony_ci				  GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
9168c2ecf20Sopenharmony_ci	}
9178c2ecf20Sopenharmony_ci
9188c2ecf20Sopenharmony_ci	return gen8_append_oa_reports(stream, buf, count, offset);
9198c2ecf20Sopenharmony_ci}
9208c2ecf20Sopenharmony_ci
9218c2ecf20Sopenharmony_ci/**
9228c2ecf20Sopenharmony_ci * Copies all buffered OA reports into userspace read() buffer.
9238c2ecf20Sopenharmony_ci * @stream: An i915-perf stream opened for OA metrics
9248c2ecf20Sopenharmony_ci * @buf: destination buffer given by userspace
9258c2ecf20Sopenharmony_ci * @count: the number of bytes userspace wants to read
9268c2ecf20Sopenharmony_ci * @offset: (inout): the current position for writing into @buf
9278c2ecf20Sopenharmony_ci *
9288c2ecf20Sopenharmony_ci * Notably any error condition resulting in a short read (-%ENOSPC or
9298c2ecf20Sopenharmony_ci * -%EFAULT) will be returned even though one or more records may
9308c2ecf20Sopenharmony_ci * have been successfully copied. In this case it's up to the caller
9318c2ecf20Sopenharmony_ci * to decide if the error should be squashed before returning to
9328c2ecf20Sopenharmony_ci * userspace.
9338c2ecf20Sopenharmony_ci *
9348c2ecf20Sopenharmony_ci * Note: reports are consumed from the head, and appended to the
9358c2ecf20Sopenharmony_ci * tail, so the tail chases the head?... If you think that's mad
9368c2ecf20Sopenharmony_ci * and back-to-front you're not alone, but this follows the
9378c2ecf20Sopenharmony_ci * Gen PRM naming convention.
9388c2ecf20Sopenharmony_ci *
9398c2ecf20Sopenharmony_ci * Returns: 0 on success, negative error code on failure.
9408c2ecf20Sopenharmony_ci */
9418c2ecf20Sopenharmony_cistatic int gen7_append_oa_reports(struct i915_perf_stream *stream,
9428c2ecf20Sopenharmony_ci				  char __user *buf,
9438c2ecf20Sopenharmony_ci				  size_t count,
9448c2ecf20Sopenharmony_ci				  size_t *offset)
9458c2ecf20Sopenharmony_ci{
9468c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
9478c2ecf20Sopenharmony_ci	int report_size = stream->oa_buffer.format_size;
9488c2ecf20Sopenharmony_ci	u8 *oa_buf_base = stream->oa_buffer.vaddr;
9498c2ecf20Sopenharmony_ci	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
9508c2ecf20Sopenharmony_ci	u32 mask = (OA_BUFFER_SIZE - 1);
9518c2ecf20Sopenharmony_ci	size_t start_offset = *offset;
9528c2ecf20Sopenharmony_ci	unsigned long flags;
9538c2ecf20Sopenharmony_ci	u32 head, tail;
9548c2ecf20Sopenharmony_ci	u32 taken;
9558c2ecf20Sopenharmony_ci	int ret = 0;
9568c2ecf20Sopenharmony_ci
9578c2ecf20Sopenharmony_ci	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
9588c2ecf20Sopenharmony_ci		return -EIO;
9598c2ecf20Sopenharmony_ci
9608c2ecf20Sopenharmony_ci	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
9618c2ecf20Sopenharmony_ci
9628c2ecf20Sopenharmony_ci	head = stream->oa_buffer.head;
9638c2ecf20Sopenharmony_ci	tail = stream->oa_buffer.tail;
9648c2ecf20Sopenharmony_ci
9658c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
9668c2ecf20Sopenharmony_ci
9678c2ecf20Sopenharmony_ci	/* NB: oa_buffer.head/tail include the gtt_offset which we don't want
9688c2ecf20Sopenharmony_ci	 * while indexing relative to oa_buf_base.
9698c2ecf20Sopenharmony_ci	 */
9708c2ecf20Sopenharmony_ci	head -= gtt_offset;
9718c2ecf20Sopenharmony_ci	tail -= gtt_offset;
9728c2ecf20Sopenharmony_ci
9738c2ecf20Sopenharmony_ci	/* An out of bounds or misaligned head or tail pointer implies a driver
9748c2ecf20Sopenharmony_ci	 * bug since we validate + align the tail pointers we read from the
9758c2ecf20Sopenharmony_ci	 * hardware and we are in full control of the head pointer which should
9768c2ecf20Sopenharmony_ci	 * only be incremented by multiples of the report size (notably also
9778c2ecf20Sopenharmony_ci	 * all a power of two).
9788c2ecf20Sopenharmony_ci	 */
9798c2ecf20Sopenharmony_ci	if (drm_WARN_ONCE(&uncore->i915->drm,
9808c2ecf20Sopenharmony_ci			  head > OA_BUFFER_SIZE || head % report_size ||
9818c2ecf20Sopenharmony_ci			  tail > OA_BUFFER_SIZE || tail % report_size,
9828c2ecf20Sopenharmony_ci			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
9838c2ecf20Sopenharmony_ci			  head, tail))
9848c2ecf20Sopenharmony_ci		return -EIO;
9858c2ecf20Sopenharmony_ci
9868c2ecf20Sopenharmony_ci
9878c2ecf20Sopenharmony_ci	for (/* none */;
9888c2ecf20Sopenharmony_ci	     (taken = OA_TAKEN(tail, head));
9898c2ecf20Sopenharmony_ci	     head = (head + report_size) & mask) {
9908c2ecf20Sopenharmony_ci		u8 *report = oa_buf_base + head;
9918c2ecf20Sopenharmony_ci		u32 *report32 = (void *)report;
9928c2ecf20Sopenharmony_ci
9938c2ecf20Sopenharmony_ci		/* All the report sizes factor neatly into the buffer
9948c2ecf20Sopenharmony_ci		 * size so we never expect to see a report split
9958c2ecf20Sopenharmony_ci		 * between the beginning and end of the buffer.
9968c2ecf20Sopenharmony_ci		 *
9978c2ecf20Sopenharmony_ci		 * Given the initial alignment check a misalignment
9988c2ecf20Sopenharmony_ci		 * here would imply a driver bug that would result
9998c2ecf20Sopenharmony_ci		 * in an overrun.
10008c2ecf20Sopenharmony_ci		 */
10018c2ecf20Sopenharmony_ci		if (drm_WARN_ON(&uncore->i915->drm,
10028c2ecf20Sopenharmony_ci				(OA_BUFFER_SIZE - head) < report_size)) {
10038c2ecf20Sopenharmony_ci			drm_err(&uncore->i915->drm,
10048c2ecf20Sopenharmony_ci				"Spurious OA head ptr: non-integral report offset\n");
10058c2ecf20Sopenharmony_ci			break;
10068c2ecf20Sopenharmony_ci		}
10078c2ecf20Sopenharmony_ci
10088c2ecf20Sopenharmony_ci		/* The report-ID field for periodic samples includes
10098c2ecf20Sopenharmony_ci		 * some undocumented flags related to what triggered
10108c2ecf20Sopenharmony_ci		 * the report and is never expected to be zero so we
10118c2ecf20Sopenharmony_ci		 * can check that the report isn't invalid before
10128c2ecf20Sopenharmony_ci		 * copying it to userspace...
10138c2ecf20Sopenharmony_ci		 */
10148c2ecf20Sopenharmony_ci		if (report32[0] == 0) {
10158c2ecf20Sopenharmony_ci			if (__ratelimit(&stream->perf->spurious_report_rs))
10168c2ecf20Sopenharmony_ci				DRM_NOTE("Skipping spurious, invalid OA report\n");
10178c2ecf20Sopenharmony_ci			continue;
10188c2ecf20Sopenharmony_ci		}
10198c2ecf20Sopenharmony_ci
10208c2ecf20Sopenharmony_ci		ret = append_oa_sample(stream, buf, count, offset, report);
10218c2ecf20Sopenharmony_ci		if (ret)
10228c2ecf20Sopenharmony_ci			break;
10238c2ecf20Sopenharmony_ci
10248c2ecf20Sopenharmony_ci		/* Clear out the first 2 dwords as a mean to detect unlanded
10258c2ecf20Sopenharmony_ci		 * reports.
10268c2ecf20Sopenharmony_ci		 */
10278c2ecf20Sopenharmony_ci		report32[0] = 0;
10288c2ecf20Sopenharmony_ci		report32[1] = 0;
10298c2ecf20Sopenharmony_ci	}
10308c2ecf20Sopenharmony_ci
10318c2ecf20Sopenharmony_ci	if (start_offset != *offset) {
10328c2ecf20Sopenharmony_ci		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
10338c2ecf20Sopenharmony_ci
10348c2ecf20Sopenharmony_ci		/* We removed the gtt_offset for the copy loop above, indexing
10358c2ecf20Sopenharmony_ci		 * relative to oa_buf_base so put back here...
10368c2ecf20Sopenharmony_ci		 */
10378c2ecf20Sopenharmony_ci		head += gtt_offset;
10388c2ecf20Sopenharmony_ci
10398c2ecf20Sopenharmony_ci		intel_uncore_write(uncore, GEN7_OASTATUS2,
10408c2ecf20Sopenharmony_ci				   (head & GEN7_OASTATUS2_HEAD_MASK) |
10418c2ecf20Sopenharmony_ci				   GEN7_OASTATUS2_MEM_SELECT_GGTT);
10428c2ecf20Sopenharmony_ci		stream->oa_buffer.head = head;
10438c2ecf20Sopenharmony_ci
10448c2ecf20Sopenharmony_ci		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
10458c2ecf20Sopenharmony_ci	}
10468c2ecf20Sopenharmony_ci
10478c2ecf20Sopenharmony_ci	return ret;
10488c2ecf20Sopenharmony_ci}
10498c2ecf20Sopenharmony_ci
10508c2ecf20Sopenharmony_ci/**
10518c2ecf20Sopenharmony_ci * gen7_oa_read - copy status records then buffered OA reports
10528c2ecf20Sopenharmony_ci * @stream: An i915-perf stream opened for OA metrics
10538c2ecf20Sopenharmony_ci * @buf: destination buffer given by userspace
10548c2ecf20Sopenharmony_ci * @count: the number of bytes userspace wants to read
10558c2ecf20Sopenharmony_ci * @offset: (inout): the current position for writing into @buf
10568c2ecf20Sopenharmony_ci *
10578c2ecf20Sopenharmony_ci * Checks Gen 7 specific OA unit status registers and if necessary appends
10588c2ecf20Sopenharmony_ci * corresponding status records for userspace (such as for a buffer full
10598c2ecf20Sopenharmony_ci * condition) and then initiate appending any buffered OA reports.
10608c2ecf20Sopenharmony_ci *
10618c2ecf20Sopenharmony_ci * Updates @offset according to the number of bytes successfully copied into
10628c2ecf20Sopenharmony_ci * the userspace buffer.
10638c2ecf20Sopenharmony_ci *
10648c2ecf20Sopenharmony_ci * Returns: zero on success or a negative error code
10658c2ecf20Sopenharmony_ci */
10668c2ecf20Sopenharmony_cistatic int gen7_oa_read(struct i915_perf_stream *stream,
10678c2ecf20Sopenharmony_ci			char __user *buf,
10688c2ecf20Sopenharmony_ci			size_t count,
10698c2ecf20Sopenharmony_ci			size_t *offset)
10708c2ecf20Sopenharmony_ci{
10718c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
10728c2ecf20Sopenharmony_ci	u32 oastatus1;
10738c2ecf20Sopenharmony_ci	int ret;
10748c2ecf20Sopenharmony_ci
10758c2ecf20Sopenharmony_ci	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
10768c2ecf20Sopenharmony_ci		return -EIO;
10778c2ecf20Sopenharmony_ci
10788c2ecf20Sopenharmony_ci	oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
10798c2ecf20Sopenharmony_ci
10808c2ecf20Sopenharmony_ci	/* XXX: On Haswell we don't have a safe way to clear oastatus1
10818c2ecf20Sopenharmony_ci	 * bits while the OA unit is enabled (while the tail pointer
10828c2ecf20Sopenharmony_ci	 * may be updated asynchronously) so we ignore status bits
10838c2ecf20Sopenharmony_ci	 * that have already been reported to userspace.
10848c2ecf20Sopenharmony_ci	 */
10858c2ecf20Sopenharmony_ci	oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
10868c2ecf20Sopenharmony_ci
10878c2ecf20Sopenharmony_ci	/* We treat OABUFFER_OVERFLOW as a significant error:
10888c2ecf20Sopenharmony_ci	 *
10898c2ecf20Sopenharmony_ci	 * - The status can be interpreted to mean that the buffer is
10908c2ecf20Sopenharmony_ci	 *   currently full (with a higher precedence than OA_TAKEN()
10918c2ecf20Sopenharmony_ci	 *   which will start to report a near-empty buffer after an
10928c2ecf20Sopenharmony_ci	 *   overflow) but it's awkward that we can't clear the status
10938c2ecf20Sopenharmony_ci	 *   on Haswell, so without a reset we won't be able to catch
10948c2ecf20Sopenharmony_ci	 *   the state again.
10958c2ecf20Sopenharmony_ci	 *
10968c2ecf20Sopenharmony_ci	 * - Since it also implies the HW has started overwriting old
10978c2ecf20Sopenharmony_ci	 *   reports it may also affect our sanity checks for invalid
10988c2ecf20Sopenharmony_ci	 *   reports when copying to userspace that assume new reports
10998c2ecf20Sopenharmony_ci	 *   are being written to cleared memory.
11008c2ecf20Sopenharmony_ci	 *
11018c2ecf20Sopenharmony_ci	 * - In the future we may want to introduce a flight recorder
11028c2ecf20Sopenharmony_ci	 *   mode where the driver will automatically maintain a safe
11038c2ecf20Sopenharmony_ci	 *   guard band between head/tail, avoiding this overflow
11048c2ecf20Sopenharmony_ci	 *   condition, but we avoid the added driver complexity for
11058c2ecf20Sopenharmony_ci	 *   now.
11068c2ecf20Sopenharmony_ci	 */
11078c2ecf20Sopenharmony_ci	if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
11088c2ecf20Sopenharmony_ci		ret = append_oa_status(stream, buf, count, offset,
11098c2ecf20Sopenharmony_ci				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
11108c2ecf20Sopenharmony_ci		if (ret)
11118c2ecf20Sopenharmony_ci			return ret;
11128c2ecf20Sopenharmony_ci
11138c2ecf20Sopenharmony_ci		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
11148c2ecf20Sopenharmony_ci			  stream->period_exponent);
11158c2ecf20Sopenharmony_ci
11168c2ecf20Sopenharmony_ci		stream->perf->ops.oa_disable(stream);
11178c2ecf20Sopenharmony_ci		stream->perf->ops.oa_enable(stream);
11188c2ecf20Sopenharmony_ci
11198c2ecf20Sopenharmony_ci		oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
11208c2ecf20Sopenharmony_ci	}
11218c2ecf20Sopenharmony_ci
11228c2ecf20Sopenharmony_ci	if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
11238c2ecf20Sopenharmony_ci		ret = append_oa_status(stream, buf, count, offset,
11248c2ecf20Sopenharmony_ci				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
11258c2ecf20Sopenharmony_ci		if (ret)
11268c2ecf20Sopenharmony_ci			return ret;
11278c2ecf20Sopenharmony_ci		stream->perf->gen7_latched_oastatus1 |=
11288c2ecf20Sopenharmony_ci			GEN7_OASTATUS1_REPORT_LOST;
11298c2ecf20Sopenharmony_ci	}
11308c2ecf20Sopenharmony_ci
11318c2ecf20Sopenharmony_ci	return gen7_append_oa_reports(stream, buf, count, offset);
11328c2ecf20Sopenharmony_ci}
11338c2ecf20Sopenharmony_ci
11348c2ecf20Sopenharmony_ci/**
11358c2ecf20Sopenharmony_ci * i915_oa_wait_unlocked - handles blocking IO until OA data available
11368c2ecf20Sopenharmony_ci * @stream: An i915-perf stream opened for OA metrics
11378c2ecf20Sopenharmony_ci *
11388c2ecf20Sopenharmony_ci * Called when userspace tries to read() from a blocking stream FD opened
11398c2ecf20Sopenharmony_ci * for OA metrics. It waits until the hrtimer callback finds a non-empty
11408c2ecf20Sopenharmony_ci * OA buffer and wakes us.
11418c2ecf20Sopenharmony_ci *
11428c2ecf20Sopenharmony_ci * Note: it's acceptable to have this return with some false positives
11438c2ecf20Sopenharmony_ci * since any subsequent read handling will return -EAGAIN if there isn't
11448c2ecf20Sopenharmony_ci * really data ready for userspace yet.
11458c2ecf20Sopenharmony_ci *
11468c2ecf20Sopenharmony_ci * Returns: zero on success or a negative error code
11478c2ecf20Sopenharmony_ci */
11488c2ecf20Sopenharmony_cistatic int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
11498c2ecf20Sopenharmony_ci{
11508c2ecf20Sopenharmony_ci	/* We would wait indefinitely if periodic sampling is not enabled */
11518c2ecf20Sopenharmony_ci	if (!stream->periodic)
11528c2ecf20Sopenharmony_ci		return -EIO;
11538c2ecf20Sopenharmony_ci
11548c2ecf20Sopenharmony_ci	return wait_event_interruptible(stream->poll_wq,
11558c2ecf20Sopenharmony_ci					oa_buffer_check_unlocked(stream));
11568c2ecf20Sopenharmony_ci}
11578c2ecf20Sopenharmony_ci
11588c2ecf20Sopenharmony_ci/**
11598c2ecf20Sopenharmony_ci * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
11608c2ecf20Sopenharmony_ci * @stream: An i915-perf stream opened for OA metrics
11618c2ecf20Sopenharmony_ci * @file: An i915 perf stream file
11628c2ecf20Sopenharmony_ci * @wait: poll() state table
11638c2ecf20Sopenharmony_ci *
11648c2ecf20Sopenharmony_ci * For handling userspace polling on an i915 perf stream opened for OA metrics,
11658c2ecf20Sopenharmony_ci * this starts a poll_wait with the wait queue that our hrtimer callback wakes
11668c2ecf20Sopenharmony_ci * when it sees data ready to read in the circular OA buffer.
11678c2ecf20Sopenharmony_ci */
11688c2ecf20Sopenharmony_cistatic void i915_oa_poll_wait(struct i915_perf_stream *stream,
11698c2ecf20Sopenharmony_ci			      struct file *file,
11708c2ecf20Sopenharmony_ci			      poll_table *wait)
11718c2ecf20Sopenharmony_ci{
11728c2ecf20Sopenharmony_ci	poll_wait(file, &stream->poll_wq, wait);
11738c2ecf20Sopenharmony_ci}
11748c2ecf20Sopenharmony_ci
11758c2ecf20Sopenharmony_ci/**
11768c2ecf20Sopenharmony_ci * i915_oa_read - just calls through to &i915_oa_ops->read
11778c2ecf20Sopenharmony_ci * @stream: An i915-perf stream opened for OA metrics
11788c2ecf20Sopenharmony_ci * @buf: destination buffer given by userspace
11798c2ecf20Sopenharmony_ci * @count: the number of bytes userspace wants to read
11808c2ecf20Sopenharmony_ci * @offset: (inout): the current position for writing into @buf
11818c2ecf20Sopenharmony_ci *
11828c2ecf20Sopenharmony_ci * Updates @offset according to the number of bytes successfully copied into
11838c2ecf20Sopenharmony_ci * the userspace buffer.
11848c2ecf20Sopenharmony_ci *
11858c2ecf20Sopenharmony_ci * Returns: zero on success or a negative error code
11868c2ecf20Sopenharmony_ci */
11878c2ecf20Sopenharmony_cistatic int i915_oa_read(struct i915_perf_stream *stream,
11888c2ecf20Sopenharmony_ci			char __user *buf,
11898c2ecf20Sopenharmony_ci			size_t count,
11908c2ecf20Sopenharmony_ci			size_t *offset)
11918c2ecf20Sopenharmony_ci{
11928c2ecf20Sopenharmony_ci	return stream->perf->ops.read(stream, buf, count, offset);
11938c2ecf20Sopenharmony_ci}
11948c2ecf20Sopenharmony_ci
11958c2ecf20Sopenharmony_cistatic struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
11968c2ecf20Sopenharmony_ci{
11978c2ecf20Sopenharmony_ci	struct i915_gem_engines_iter it;
11988c2ecf20Sopenharmony_ci	struct i915_gem_context *ctx = stream->ctx;
11998c2ecf20Sopenharmony_ci	struct intel_context *ce;
12008c2ecf20Sopenharmony_ci	struct i915_gem_ww_ctx ww;
12018c2ecf20Sopenharmony_ci	int err = -ENODEV;
12028c2ecf20Sopenharmony_ci
12038c2ecf20Sopenharmony_ci	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
12048c2ecf20Sopenharmony_ci		if (ce->engine != stream->engine) /* first match! */
12058c2ecf20Sopenharmony_ci			continue;
12068c2ecf20Sopenharmony_ci
12078c2ecf20Sopenharmony_ci		err = 0;
12088c2ecf20Sopenharmony_ci		break;
12098c2ecf20Sopenharmony_ci	}
12108c2ecf20Sopenharmony_ci	i915_gem_context_unlock_engines(ctx);
12118c2ecf20Sopenharmony_ci
12128c2ecf20Sopenharmony_ci	if (err)
12138c2ecf20Sopenharmony_ci		return ERR_PTR(err);
12148c2ecf20Sopenharmony_ci
12158c2ecf20Sopenharmony_ci	i915_gem_ww_ctx_init(&ww, true);
12168c2ecf20Sopenharmony_ciretry:
12178c2ecf20Sopenharmony_ci	/*
12188c2ecf20Sopenharmony_ci	 * As the ID is the gtt offset of the context's vma we
12198c2ecf20Sopenharmony_ci	 * pin the vma to ensure the ID remains fixed.
12208c2ecf20Sopenharmony_ci	 */
12218c2ecf20Sopenharmony_ci	err = intel_context_pin_ww(ce, &ww);
12228c2ecf20Sopenharmony_ci	if (err == -EDEADLK) {
12238c2ecf20Sopenharmony_ci		err = i915_gem_ww_ctx_backoff(&ww);
12248c2ecf20Sopenharmony_ci		if (!err)
12258c2ecf20Sopenharmony_ci			goto retry;
12268c2ecf20Sopenharmony_ci	}
12278c2ecf20Sopenharmony_ci	i915_gem_ww_ctx_fini(&ww);
12288c2ecf20Sopenharmony_ci
12298c2ecf20Sopenharmony_ci	if (err)
12308c2ecf20Sopenharmony_ci		return ERR_PTR(err);
12318c2ecf20Sopenharmony_ci
12328c2ecf20Sopenharmony_ci	stream->pinned_ctx = ce;
12338c2ecf20Sopenharmony_ci	return stream->pinned_ctx;
12348c2ecf20Sopenharmony_ci}
12358c2ecf20Sopenharmony_ci
12368c2ecf20Sopenharmony_ci/**
12378c2ecf20Sopenharmony_ci * oa_get_render_ctx_id - determine and hold ctx hw id
12388c2ecf20Sopenharmony_ci * @stream: An i915-perf stream opened for OA metrics
12398c2ecf20Sopenharmony_ci *
12408c2ecf20Sopenharmony_ci * Determine the render context hw id, and ensure it remains fixed for the
12418c2ecf20Sopenharmony_ci * lifetime of the stream. This ensures that we don't have to worry about
12428c2ecf20Sopenharmony_ci * updating the context ID in OACONTROL on the fly.
12438c2ecf20Sopenharmony_ci *
12448c2ecf20Sopenharmony_ci * Returns: zero on success or a negative error code
12458c2ecf20Sopenharmony_ci */
12468c2ecf20Sopenharmony_cistatic int oa_get_render_ctx_id(struct i915_perf_stream *stream)
12478c2ecf20Sopenharmony_ci{
12488c2ecf20Sopenharmony_ci	struct intel_context *ce;
12498c2ecf20Sopenharmony_ci
12508c2ecf20Sopenharmony_ci	ce = oa_pin_context(stream);
12518c2ecf20Sopenharmony_ci	if (IS_ERR(ce))
12528c2ecf20Sopenharmony_ci		return PTR_ERR(ce);
12538c2ecf20Sopenharmony_ci
12548c2ecf20Sopenharmony_ci	switch (INTEL_GEN(ce->engine->i915)) {
12558c2ecf20Sopenharmony_ci	case 7: {
12568c2ecf20Sopenharmony_ci		/*
12578c2ecf20Sopenharmony_ci		 * On Haswell we don't do any post processing of the reports
12588c2ecf20Sopenharmony_ci		 * and don't need to use the mask.
12598c2ecf20Sopenharmony_ci		 */
12608c2ecf20Sopenharmony_ci		stream->specific_ctx_id = i915_ggtt_offset(ce->state);
12618c2ecf20Sopenharmony_ci		stream->specific_ctx_id_mask = 0;
12628c2ecf20Sopenharmony_ci		break;
12638c2ecf20Sopenharmony_ci	}
12648c2ecf20Sopenharmony_ci
12658c2ecf20Sopenharmony_ci	case 8:
12668c2ecf20Sopenharmony_ci	case 9:
12678c2ecf20Sopenharmony_ci	case 10:
12688c2ecf20Sopenharmony_ci		if (intel_engine_in_execlists_submission_mode(ce->engine)) {
12698c2ecf20Sopenharmony_ci			stream->specific_ctx_id_mask =
12708c2ecf20Sopenharmony_ci				(1U << GEN8_CTX_ID_WIDTH) - 1;
12718c2ecf20Sopenharmony_ci			stream->specific_ctx_id = stream->specific_ctx_id_mask;
12728c2ecf20Sopenharmony_ci		} else {
12738c2ecf20Sopenharmony_ci			/*
12748c2ecf20Sopenharmony_ci			 * When using GuC, the context descriptor we write in
12758c2ecf20Sopenharmony_ci			 * i915 is read by GuC and rewritten before it's
12768c2ecf20Sopenharmony_ci			 * actually written into the hardware. The LRCA is
12778c2ecf20Sopenharmony_ci			 * what is put into the context id field of the
12788c2ecf20Sopenharmony_ci			 * context descriptor by GuC. Because it's aligned to
12798c2ecf20Sopenharmony_ci			 * a page, the lower 12bits are always at 0 and
12808c2ecf20Sopenharmony_ci			 * dropped by GuC. They won't be part of the context
12818c2ecf20Sopenharmony_ci			 * ID in the OA reports, so squash those lower bits.
12828c2ecf20Sopenharmony_ci			 */
12838c2ecf20Sopenharmony_ci			stream->specific_ctx_id = ce->lrc.lrca >> 12;
12848c2ecf20Sopenharmony_ci
12858c2ecf20Sopenharmony_ci			/*
12868c2ecf20Sopenharmony_ci			 * GuC uses the top bit to signal proxy submission, so
12878c2ecf20Sopenharmony_ci			 * ignore that bit.
12888c2ecf20Sopenharmony_ci			 */
12898c2ecf20Sopenharmony_ci			stream->specific_ctx_id_mask =
12908c2ecf20Sopenharmony_ci				(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
12918c2ecf20Sopenharmony_ci		}
12928c2ecf20Sopenharmony_ci		break;
12938c2ecf20Sopenharmony_ci
12948c2ecf20Sopenharmony_ci	case 11:
12958c2ecf20Sopenharmony_ci	case 12: {
12968c2ecf20Sopenharmony_ci		stream->specific_ctx_id_mask =
12978c2ecf20Sopenharmony_ci			((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
12988c2ecf20Sopenharmony_ci		/*
12998c2ecf20Sopenharmony_ci		 * Pick an unused context id
13008c2ecf20Sopenharmony_ci		 * 0 - BITS_PER_LONG are used by other contexts
13018c2ecf20Sopenharmony_ci		 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
13028c2ecf20Sopenharmony_ci		 */
13038c2ecf20Sopenharmony_ci		stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
13048c2ecf20Sopenharmony_ci		break;
13058c2ecf20Sopenharmony_ci	}
13068c2ecf20Sopenharmony_ci
13078c2ecf20Sopenharmony_ci	default:
13088c2ecf20Sopenharmony_ci		MISSING_CASE(INTEL_GEN(ce->engine->i915));
13098c2ecf20Sopenharmony_ci	}
13108c2ecf20Sopenharmony_ci
13118c2ecf20Sopenharmony_ci	ce->tag = stream->specific_ctx_id;
13128c2ecf20Sopenharmony_ci
13138c2ecf20Sopenharmony_ci	drm_dbg(&stream->perf->i915->drm,
13148c2ecf20Sopenharmony_ci		"filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
13158c2ecf20Sopenharmony_ci		stream->specific_ctx_id,
13168c2ecf20Sopenharmony_ci		stream->specific_ctx_id_mask);
13178c2ecf20Sopenharmony_ci
13188c2ecf20Sopenharmony_ci	return 0;
13198c2ecf20Sopenharmony_ci}
13208c2ecf20Sopenharmony_ci
13218c2ecf20Sopenharmony_ci/**
13228c2ecf20Sopenharmony_ci * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
13238c2ecf20Sopenharmony_ci * @stream: An i915-perf stream opened for OA metrics
13248c2ecf20Sopenharmony_ci *
13258c2ecf20Sopenharmony_ci * In case anything needed doing to ensure the context HW ID would remain valid
13268c2ecf20Sopenharmony_ci * for the lifetime of the stream, then that can be undone here.
13278c2ecf20Sopenharmony_ci */
13288c2ecf20Sopenharmony_cistatic void oa_put_render_ctx_id(struct i915_perf_stream *stream)
13298c2ecf20Sopenharmony_ci{
13308c2ecf20Sopenharmony_ci	struct intel_context *ce;
13318c2ecf20Sopenharmony_ci
13328c2ecf20Sopenharmony_ci	ce = fetch_and_zero(&stream->pinned_ctx);
13338c2ecf20Sopenharmony_ci	if (ce) {
13348c2ecf20Sopenharmony_ci		ce->tag = 0; /* recomputed on next submission after parking */
13358c2ecf20Sopenharmony_ci		intel_context_unpin(ce);
13368c2ecf20Sopenharmony_ci	}
13378c2ecf20Sopenharmony_ci
13388c2ecf20Sopenharmony_ci	stream->specific_ctx_id = INVALID_CTX_ID;
13398c2ecf20Sopenharmony_ci	stream->specific_ctx_id_mask = 0;
13408c2ecf20Sopenharmony_ci}
13418c2ecf20Sopenharmony_ci
13428c2ecf20Sopenharmony_cistatic void
13438c2ecf20Sopenharmony_cifree_oa_buffer(struct i915_perf_stream *stream)
13448c2ecf20Sopenharmony_ci{
13458c2ecf20Sopenharmony_ci	i915_vma_unpin_and_release(&stream->oa_buffer.vma,
13468c2ecf20Sopenharmony_ci				   I915_VMA_RELEASE_MAP);
13478c2ecf20Sopenharmony_ci
13488c2ecf20Sopenharmony_ci	stream->oa_buffer.vaddr = NULL;
13498c2ecf20Sopenharmony_ci}
13508c2ecf20Sopenharmony_ci
13518c2ecf20Sopenharmony_cistatic void
13528c2ecf20Sopenharmony_cifree_oa_configs(struct i915_perf_stream *stream)
13538c2ecf20Sopenharmony_ci{
13548c2ecf20Sopenharmony_ci	struct i915_oa_config_bo *oa_bo, *tmp;
13558c2ecf20Sopenharmony_ci
13568c2ecf20Sopenharmony_ci	i915_oa_config_put(stream->oa_config);
13578c2ecf20Sopenharmony_ci	llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
13588c2ecf20Sopenharmony_ci		free_oa_config_bo(oa_bo);
13598c2ecf20Sopenharmony_ci}
13608c2ecf20Sopenharmony_ci
13618c2ecf20Sopenharmony_cistatic void
13628c2ecf20Sopenharmony_cifree_noa_wait(struct i915_perf_stream *stream)
13638c2ecf20Sopenharmony_ci{
13648c2ecf20Sopenharmony_ci	i915_vma_unpin_and_release(&stream->noa_wait, 0);
13658c2ecf20Sopenharmony_ci}
13668c2ecf20Sopenharmony_ci
13678c2ecf20Sopenharmony_cistatic void i915_oa_stream_destroy(struct i915_perf_stream *stream)
13688c2ecf20Sopenharmony_ci{
13698c2ecf20Sopenharmony_ci	struct i915_perf *perf = stream->perf;
13708c2ecf20Sopenharmony_ci
13718c2ecf20Sopenharmony_ci	BUG_ON(stream != perf->exclusive_stream);
13728c2ecf20Sopenharmony_ci
13738c2ecf20Sopenharmony_ci	/*
13748c2ecf20Sopenharmony_ci	 * Unset exclusive_stream first, it will be checked while disabling
13758c2ecf20Sopenharmony_ci	 * the metric set on gen8+.
13768c2ecf20Sopenharmony_ci	 *
13778c2ecf20Sopenharmony_ci	 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
13788c2ecf20Sopenharmony_ci	 */
13798c2ecf20Sopenharmony_ci	WRITE_ONCE(perf->exclusive_stream, NULL);
13808c2ecf20Sopenharmony_ci	perf->ops.disable_metric_set(stream);
13818c2ecf20Sopenharmony_ci
13828c2ecf20Sopenharmony_ci	free_oa_buffer(stream);
13838c2ecf20Sopenharmony_ci
13848c2ecf20Sopenharmony_ci	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
13858c2ecf20Sopenharmony_ci	intel_engine_pm_put(stream->engine);
13868c2ecf20Sopenharmony_ci
13878c2ecf20Sopenharmony_ci	if (stream->ctx)
13888c2ecf20Sopenharmony_ci		oa_put_render_ctx_id(stream);
13898c2ecf20Sopenharmony_ci
13908c2ecf20Sopenharmony_ci	free_oa_configs(stream);
13918c2ecf20Sopenharmony_ci	free_noa_wait(stream);
13928c2ecf20Sopenharmony_ci
13938c2ecf20Sopenharmony_ci	if (perf->spurious_report_rs.missed) {
13948c2ecf20Sopenharmony_ci		DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
13958c2ecf20Sopenharmony_ci			 perf->spurious_report_rs.missed);
13968c2ecf20Sopenharmony_ci	}
13978c2ecf20Sopenharmony_ci}
13988c2ecf20Sopenharmony_ci
13998c2ecf20Sopenharmony_cistatic void gen7_init_oa_buffer(struct i915_perf_stream *stream)
14008c2ecf20Sopenharmony_ci{
14018c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
14028c2ecf20Sopenharmony_ci	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
14038c2ecf20Sopenharmony_ci	unsigned long flags;
14048c2ecf20Sopenharmony_ci
14058c2ecf20Sopenharmony_ci	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
14068c2ecf20Sopenharmony_ci
14078c2ecf20Sopenharmony_ci	/* Pre-DevBDW: OABUFFER must be set with counters off,
14088c2ecf20Sopenharmony_ci	 * before OASTATUS1, but after OASTATUS2
14098c2ecf20Sopenharmony_ci	 */
14108c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
14118c2ecf20Sopenharmony_ci			   gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
14128c2ecf20Sopenharmony_ci	stream->oa_buffer.head = gtt_offset;
14138c2ecf20Sopenharmony_ci
14148c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
14158c2ecf20Sopenharmony_ci
14168c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
14178c2ecf20Sopenharmony_ci			   gtt_offset | OABUFFER_SIZE_16M);
14188c2ecf20Sopenharmony_ci
14198c2ecf20Sopenharmony_ci	/* Mark that we need updated tail pointers to read from... */
14208c2ecf20Sopenharmony_ci	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
14218c2ecf20Sopenharmony_ci	stream->oa_buffer.tail = gtt_offset;
14228c2ecf20Sopenharmony_ci
14238c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
14248c2ecf20Sopenharmony_ci
14258c2ecf20Sopenharmony_ci	/* On Haswell we have to track which OASTATUS1 flags we've
14268c2ecf20Sopenharmony_ci	 * already seen since they can't be cleared while periodic
14278c2ecf20Sopenharmony_ci	 * sampling is enabled.
14288c2ecf20Sopenharmony_ci	 */
14298c2ecf20Sopenharmony_ci	stream->perf->gen7_latched_oastatus1 = 0;
14308c2ecf20Sopenharmony_ci
14318c2ecf20Sopenharmony_ci	/* NB: although the OA buffer will initially be allocated
14328c2ecf20Sopenharmony_ci	 * zeroed via shmfs (and so this memset is redundant when
14338c2ecf20Sopenharmony_ci	 * first allocating), we may re-init the OA buffer, either
14348c2ecf20Sopenharmony_ci	 * when re-enabling a stream or in error/reset paths.
14358c2ecf20Sopenharmony_ci	 *
14368c2ecf20Sopenharmony_ci	 * The reason we clear the buffer for each re-init is for the
14378c2ecf20Sopenharmony_ci	 * sanity check in gen7_append_oa_reports() that looks at the
14388c2ecf20Sopenharmony_ci	 * report-id field to make sure it's non-zero which relies on
14398c2ecf20Sopenharmony_ci	 * the assumption that new reports are being written to zeroed
14408c2ecf20Sopenharmony_ci	 * memory...
14418c2ecf20Sopenharmony_ci	 */
14428c2ecf20Sopenharmony_ci	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
14438c2ecf20Sopenharmony_ci}
14448c2ecf20Sopenharmony_ci
14458c2ecf20Sopenharmony_cistatic void gen8_init_oa_buffer(struct i915_perf_stream *stream)
14468c2ecf20Sopenharmony_ci{
14478c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
14488c2ecf20Sopenharmony_ci	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
14498c2ecf20Sopenharmony_ci	unsigned long flags;
14508c2ecf20Sopenharmony_ci
14518c2ecf20Sopenharmony_ci	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
14528c2ecf20Sopenharmony_ci
14538c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN8_OASTATUS, 0);
14548c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
14558c2ecf20Sopenharmony_ci	stream->oa_buffer.head = gtt_offset;
14568c2ecf20Sopenharmony_ci
14578c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
14588c2ecf20Sopenharmony_ci
14598c2ecf20Sopenharmony_ci	/*
14608c2ecf20Sopenharmony_ci	 * PRM says:
14618c2ecf20Sopenharmony_ci	 *
14628c2ecf20Sopenharmony_ci	 *  "This MMIO must be set before the OATAILPTR
14638c2ecf20Sopenharmony_ci	 *  register and after the OAHEADPTR register. This is
14648c2ecf20Sopenharmony_ci	 *  to enable proper functionality of the overflow
14658c2ecf20Sopenharmony_ci	 *  bit."
14668c2ecf20Sopenharmony_ci	 */
14678c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
14688c2ecf20Sopenharmony_ci		   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
14698c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
14708c2ecf20Sopenharmony_ci
14718c2ecf20Sopenharmony_ci	/* Mark that we need updated tail pointers to read from... */
14728c2ecf20Sopenharmony_ci	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
14738c2ecf20Sopenharmony_ci	stream->oa_buffer.tail = gtt_offset;
14748c2ecf20Sopenharmony_ci
14758c2ecf20Sopenharmony_ci	/*
14768c2ecf20Sopenharmony_ci	 * Reset state used to recognise context switches, affecting which
14778c2ecf20Sopenharmony_ci	 * reports we will forward to userspace while filtering for a single
14788c2ecf20Sopenharmony_ci	 * context.
14798c2ecf20Sopenharmony_ci	 */
14808c2ecf20Sopenharmony_ci	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
14818c2ecf20Sopenharmony_ci
14828c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
14838c2ecf20Sopenharmony_ci
14848c2ecf20Sopenharmony_ci	/*
14858c2ecf20Sopenharmony_ci	 * NB: although the OA buffer will initially be allocated
14868c2ecf20Sopenharmony_ci	 * zeroed via shmfs (and so this memset is redundant when
14878c2ecf20Sopenharmony_ci	 * first allocating), we may re-init the OA buffer, either
14888c2ecf20Sopenharmony_ci	 * when re-enabling a stream or in error/reset paths.
14898c2ecf20Sopenharmony_ci	 *
14908c2ecf20Sopenharmony_ci	 * The reason we clear the buffer for each re-init is for the
14918c2ecf20Sopenharmony_ci	 * sanity check in gen8_append_oa_reports() that looks at the
14928c2ecf20Sopenharmony_ci	 * reason field to make sure it's non-zero which relies on
14938c2ecf20Sopenharmony_ci	 * the assumption that new reports are being written to zeroed
14948c2ecf20Sopenharmony_ci	 * memory...
14958c2ecf20Sopenharmony_ci	 */
14968c2ecf20Sopenharmony_ci	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
14978c2ecf20Sopenharmony_ci}
14988c2ecf20Sopenharmony_ci
14998c2ecf20Sopenharmony_cistatic void gen12_init_oa_buffer(struct i915_perf_stream *stream)
15008c2ecf20Sopenharmony_ci{
15018c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
15028c2ecf20Sopenharmony_ci	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
15038c2ecf20Sopenharmony_ci	unsigned long flags;
15048c2ecf20Sopenharmony_ci
15058c2ecf20Sopenharmony_ci	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
15068c2ecf20Sopenharmony_ci
15078c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
15088c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
15098c2ecf20Sopenharmony_ci			   gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
15108c2ecf20Sopenharmony_ci	stream->oa_buffer.head = gtt_offset;
15118c2ecf20Sopenharmony_ci
15128c2ecf20Sopenharmony_ci	/*
15138c2ecf20Sopenharmony_ci	 * PRM says:
15148c2ecf20Sopenharmony_ci	 *
15158c2ecf20Sopenharmony_ci	 *  "This MMIO must be set before the OATAILPTR
15168c2ecf20Sopenharmony_ci	 *  register and after the OAHEADPTR register. This is
15178c2ecf20Sopenharmony_ci	 *  to enable proper functionality of the overflow
15188c2ecf20Sopenharmony_ci	 *  bit."
15198c2ecf20Sopenharmony_ci	 */
15208c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
15218c2ecf20Sopenharmony_ci			   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
15228c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
15238c2ecf20Sopenharmony_ci			   gtt_offset & GEN12_OAG_OATAILPTR_MASK);
15248c2ecf20Sopenharmony_ci
15258c2ecf20Sopenharmony_ci	/* Mark that we need updated tail pointers to read from... */
15268c2ecf20Sopenharmony_ci	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
15278c2ecf20Sopenharmony_ci	stream->oa_buffer.tail = gtt_offset;
15288c2ecf20Sopenharmony_ci
15298c2ecf20Sopenharmony_ci	/*
15308c2ecf20Sopenharmony_ci	 * Reset state used to recognise context switches, affecting which
15318c2ecf20Sopenharmony_ci	 * reports we will forward to userspace while filtering for a single
15328c2ecf20Sopenharmony_ci	 * context.
15338c2ecf20Sopenharmony_ci	 */
15348c2ecf20Sopenharmony_ci	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
15358c2ecf20Sopenharmony_ci
15368c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
15378c2ecf20Sopenharmony_ci
15388c2ecf20Sopenharmony_ci	/*
15398c2ecf20Sopenharmony_ci	 * NB: although the OA buffer will initially be allocated
15408c2ecf20Sopenharmony_ci	 * zeroed via shmfs (and so this memset is redundant when
15418c2ecf20Sopenharmony_ci	 * first allocating), we may re-init the OA buffer, either
15428c2ecf20Sopenharmony_ci	 * when re-enabling a stream or in error/reset paths.
15438c2ecf20Sopenharmony_ci	 *
15448c2ecf20Sopenharmony_ci	 * The reason we clear the buffer for each re-init is for the
15458c2ecf20Sopenharmony_ci	 * sanity check in gen8_append_oa_reports() that looks at the
15468c2ecf20Sopenharmony_ci	 * reason field to make sure it's non-zero which relies on
15478c2ecf20Sopenharmony_ci	 * the assumption that new reports are being written to zeroed
15488c2ecf20Sopenharmony_ci	 * memory...
15498c2ecf20Sopenharmony_ci	 */
15508c2ecf20Sopenharmony_ci	memset(stream->oa_buffer.vaddr, 0,
15518c2ecf20Sopenharmony_ci	       stream->oa_buffer.vma->size);
15528c2ecf20Sopenharmony_ci}
15538c2ecf20Sopenharmony_ci
15548c2ecf20Sopenharmony_cistatic int alloc_oa_buffer(struct i915_perf_stream *stream)
15558c2ecf20Sopenharmony_ci{
15568c2ecf20Sopenharmony_ci	struct drm_i915_private *i915 = stream->perf->i915;
15578c2ecf20Sopenharmony_ci	struct drm_i915_gem_object *bo;
15588c2ecf20Sopenharmony_ci	struct i915_vma *vma;
15598c2ecf20Sopenharmony_ci	int ret;
15608c2ecf20Sopenharmony_ci
15618c2ecf20Sopenharmony_ci	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
15628c2ecf20Sopenharmony_ci		return -ENODEV;
15638c2ecf20Sopenharmony_ci
15648c2ecf20Sopenharmony_ci	BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
15658c2ecf20Sopenharmony_ci	BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
15668c2ecf20Sopenharmony_ci
15678c2ecf20Sopenharmony_ci	bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
15688c2ecf20Sopenharmony_ci	if (IS_ERR(bo)) {
15698c2ecf20Sopenharmony_ci		drm_err(&i915->drm, "Failed to allocate OA buffer\n");
15708c2ecf20Sopenharmony_ci		return PTR_ERR(bo);
15718c2ecf20Sopenharmony_ci	}
15728c2ecf20Sopenharmony_ci
15738c2ecf20Sopenharmony_ci	i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
15748c2ecf20Sopenharmony_ci
15758c2ecf20Sopenharmony_ci	/* PreHSW required 512K alignment, HSW requires 16M */
15768c2ecf20Sopenharmony_ci	vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
15778c2ecf20Sopenharmony_ci	if (IS_ERR(vma)) {
15788c2ecf20Sopenharmony_ci		ret = PTR_ERR(vma);
15798c2ecf20Sopenharmony_ci		goto err_unref;
15808c2ecf20Sopenharmony_ci	}
15818c2ecf20Sopenharmony_ci	stream->oa_buffer.vma = vma;
15828c2ecf20Sopenharmony_ci
15838c2ecf20Sopenharmony_ci	stream->oa_buffer.vaddr =
15848c2ecf20Sopenharmony_ci		i915_gem_object_pin_map(bo, I915_MAP_WB);
15858c2ecf20Sopenharmony_ci	if (IS_ERR(stream->oa_buffer.vaddr)) {
15868c2ecf20Sopenharmony_ci		ret = PTR_ERR(stream->oa_buffer.vaddr);
15878c2ecf20Sopenharmony_ci		goto err_unpin;
15888c2ecf20Sopenharmony_ci	}
15898c2ecf20Sopenharmony_ci
15908c2ecf20Sopenharmony_ci	return 0;
15918c2ecf20Sopenharmony_ci
15928c2ecf20Sopenharmony_cierr_unpin:
15938c2ecf20Sopenharmony_ci	__i915_vma_unpin(vma);
15948c2ecf20Sopenharmony_ci
15958c2ecf20Sopenharmony_cierr_unref:
15968c2ecf20Sopenharmony_ci	i915_gem_object_put(bo);
15978c2ecf20Sopenharmony_ci
15988c2ecf20Sopenharmony_ci	stream->oa_buffer.vaddr = NULL;
15998c2ecf20Sopenharmony_ci	stream->oa_buffer.vma = NULL;
16008c2ecf20Sopenharmony_ci
16018c2ecf20Sopenharmony_ci	return ret;
16028c2ecf20Sopenharmony_ci}
16038c2ecf20Sopenharmony_ci
16048c2ecf20Sopenharmony_cistatic u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
16058c2ecf20Sopenharmony_ci				  bool save, i915_reg_t reg, u32 offset,
16068c2ecf20Sopenharmony_ci				  u32 dword_count)
16078c2ecf20Sopenharmony_ci{
16088c2ecf20Sopenharmony_ci	u32 cmd;
16098c2ecf20Sopenharmony_ci	u32 d;
16108c2ecf20Sopenharmony_ci
16118c2ecf20Sopenharmony_ci	cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
16128c2ecf20Sopenharmony_ci	cmd |= MI_SRM_LRM_GLOBAL_GTT;
16138c2ecf20Sopenharmony_ci	if (INTEL_GEN(stream->perf->i915) >= 8)
16148c2ecf20Sopenharmony_ci		cmd++;
16158c2ecf20Sopenharmony_ci
16168c2ecf20Sopenharmony_ci	for (d = 0; d < dword_count; d++) {
16178c2ecf20Sopenharmony_ci		*cs++ = cmd;
16188c2ecf20Sopenharmony_ci		*cs++ = i915_mmio_reg_offset(reg) + 4 * d;
16198c2ecf20Sopenharmony_ci		*cs++ = intel_gt_scratch_offset(stream->engine->gt,
16208c2ecf20Sopenharmony_ci						offset) + 4 * d;
16218c2ecf20Sopenharmony_ci		*cs++ = 0;
16228c2ecf20Sopenharmony_ci	}
16238c2ecf20Sopenharmony_ci
16248c2ecf20Sopenharmony_ci	return cs;
16258c2ecf20Sopenharmony_ci}
16268c2ecf20Sopenharmony_ci
16278c2ecf20Sopenharmony_cistatic int alloc_noa_wait(struct i915_perf_stream *stream)
16288c2ecf20Sopenharmony_ci{
16298c2ecf20Sopenharmony_ci	struct drm_i915_private *i915 = stream->perf->i915;
16308c2ecf20Sopenharmony_ci	struct drm_i915_gem_object *bo;
16318c2ecf20Sopenharmony_ci	struct i915_vma *vma;
16328c2ecf20Sopenharmony_ci	const u64 delay_ticks = 0xffffffffffffffff -
16338c2ecf20Sopenharmony_ci		i915_cs_timestamp_ns_to_ticks(i915, atomic64_read(&stream->perf->noa_programming_delay));
16348c2ecf20Sopenharmony_ci	const u32 base = stream->engine->mmio_base;
16358c2ecf20Sopenharmony_ci#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
16368c2ecf20Sopenharmony_ci	u32 *batch, *ts0, *cs, *jump;
16378c2ecf20Sopenharmony_ci	int ret, i;
16388c2ecf20Sopenharmony_ci	enum {
16398c2ecf20Sopenharmony_ci		START_TS,
16408c2ecf20Sopenharmony_ci		NOW_TS,
16418c2ecf20Sopenharmony_ci		DELTA_TS,
16428c2ecf20Sopenharmony_ci		JUMP_PREDICATE,
16438c2ecf20Sopenharmony_ci		DELTA_TARGET,
16448c2ecf20Sopenharmony_ci		N_CS_GPR
16458c2ecf20Sopenharmony_ci	};
16468c2ecf20Sopenharmony_ci
16478c2ecf20Sopenharmony_ci	bo = i915_gem_object_create_internal(i915, 4096);
16488c2ecf20Sopenharmony_ci	if (IS_ERR(bo)) {
16498c2ecf20Sopenharmony_ci		drm_err(&i915->drm,
16508c2ecf20Sopenharmony_ci			"Failed to allocate NOA wait batchbuffer\n");
16518c2ecf20Sopenharmony_ci		return PTR_ERR(bo);
16528c2ecf20Sopenharmony_ci	}
16538c2ecf20Sopenharmony_ci
16548c2ecf20Sopenharmony_ci	/*
16558c2ecf20Sopenharmony_ci	 * We pin in GGTT because we jump into this buffer now because
16568c2ecf20Sopenharmony_ci	 * multiple OA config BOs will have a jump to this address and it
16578c2ecf20Sopenharmony_ci	 * needs to be fixed during the lifetime of the i915/perf stream.
16588c2ecf20Sopenharmony_ci	 */
16598c2ecf20Sopenharmony_ci	vma = i915_gem_object_ggtt_pin(bo, NULL, 0, 0, PIN_HIGH);
16608c2ecf20Sopenharmony_ci	if (IS_ERR(vma)) {
16618c2ecf20Sopenharmony_ci		ret = PTR_ERR(vma);
16628c2ecf20Sopenharmony_ci		goto err_unref;
16638c2ecf20Sopenharmony_ci	}
16648c2ecf20Sopenharmony_ci
16658c2ecf20Sopenharmony_ci	batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
16668c2ecf20Sopenharmony_ci	if (IS_ERR(batch)) {
16678c2ecf20Sopenharmony_ci		ret = PTR_ERR(batch);
16688c2ecf20Sopenharmony_ci		goto err_unpin;
16698c2ecf20Sopenharmony_ci	}
16708c2ecf20Sopenharmony_ci
16718c2ecf20Sopenharmony_ci	/* Save registers. */
16728c2ecf20Sopenharmony_ci	for (i = 0; i < N_CS_GPR; i++)
16738c2ecf20Sopenharmony_ci		cs = save_restore_register(
16748c2ecf20Sopenharmony_ci			stream, cs, true /* save */, CS_GPR(i),
16758c2ecf20Sopenharmony_ci			INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
16768c2ecf20Sopenharmony_ci	cs = save_restore_register(
16778c2ecf20Sopenharmony_ci		stream, cs, true /* save */, MI_PREDICATE_RESULT_1,
16788c2ecf20Sopenharmony_ci		INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
16798c2ecf20Sopenharmony_ci
16808c2ecf20Sopenharmony_ci	/* First timestamp snapshot location. */
16818c2ecf20Sopenharmony_ci	ts0 = cs;
16828c2ecf20Sopenharmony_ci
16838c2ecf20Sopenharmony_ci	/*
16848c2ecf20Sopenharmony_ci	 * Initial snapshot of the timestamp register to implement the wait.
16858c2ecf20Sopenharmony_ci	 * We work with 32b values, so clear out the top 32b bits of the
16868c2ecf20Sopenharmony_ci	 * register because the ALU works 64bits.
16878c2ecf20Sopenharmony_ci	 */
16888c2ecf20Sopenharmony_ci	*cs++ = MI_LOAD_REGISTER_IMM(1);
16898c2ecf20Sopenharmony_ci	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
16908c2ecf20Sopenharmony_ci	*cs++ = 0;
16918c2ecf20Sopenharmony_ci	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
16928c2ecf20Sopenharmony_ci	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
16938c2ecf20Sopenharmony_ci	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
16948c2ecf20Sopenharmony_ci
16958c2ecf20Sopenharmony_ci	/*
16968c2ecf20Sopenharmony_ci	 * This is the location we're going to jump back into until the
16978c2ecf20Sopenharmony_ci	 * required amount of time has passed.
16988c2ecf20Sopenharmony_ci	 */
16998c2ecf20Sopenharmony_ci	jump = cs;
17008c2ecf20Sopenharmony_ci
17018c2ecf20Sopenharmony_ci	/*
17028c2ecf20Sopenharmony_ci	 * Take another snapshot of the timestamp register. Take care to clear
17038c2ecf20Sopenharmony_ci	 * up the top 32bits of CS_GPR(1) as we're using it for other
17048c2ecf20Sopenharmony_ci	 * operations below.
17058c2ecf20Sopenharmony_ci	 */
17068c2ecf20Sopenharmony_ci	*cs++ = MI_LOAD_REGISTER_IMM(1);
17078c2ecf20Sopenharmony_ci	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
17088c2ecf20Sopenharmony_ci	*cs++ = 0;
17098c2ecf20Sopenharmony_ci	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
17108c2ecf20Sopenharmony_ci	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
17118c2ecf20Sopenharmony_ci	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
17128c2ecf20Sopenharmony_ci
17138c2ecf20Sopenharmony_ci	/*
17148c2ecf20Sopenharmony_ci	 * Do a diff between the 2 timestamps and store the result back into
17158c2ecf20Sopenharmony_ci	 * CS_GPR(1).
17168c2ecf20Sopenharmony_ci	 */
17178c2ecf20Sopenharmony_ci	*cs++ = MI_MATH(5);
17188c2ecf20Sopenharmony_ci	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
17198c2ecf20Sopenharmony_ci	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
17208c2ecf20Sopenharmony_ci	*cs++ = MI_MATH_SUB;
17218c2ecf20Sopenharmony_ci	*cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
17228c2ecf20Sopenharmony_ci	*cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
17238c2ecf20Sopenharmony_ci
17248c2ecf20Sopenharmony_ci	/*
17258c2ecf20Sopenharmony_ci	 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
17268c2ecf20Sopenharmony_ci	 * timestamp have rolled over the 32bits) into the predicate register
17278c2ecf20Sopenharmony_ci	 * to be used for the predicated jump.
17288c2ecf20Sopenharmony_ci	 */
17298c2ecf20Sopenharmony_ci	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
17308c2ecf20Sopenharmony_ci	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
17318c2ecf20Sopenharmony_ci	*cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
17328c2ecf20Sopenharmony_ci
17338c2ecf20Sopenharmony_ci	/* Restart from the beginning if we had timestamps roll over. */
17348c2ecf20Sopenharmony_ci	*cs++ = (INTEL_GEN(i915) < 8 ?
17358c2ecf20Sopenharmony_ci		 MI_BATCH_BUFFER_START :
17368c2ecf20Sopenharmony_ci		 MI_BATCH_BUFFER_START_GEN8) |
17378c2ecf20Sopenharmony_ci		MI_BATCH_PREDICATE;
17388c2ecf20Sopenharmony_ci	*cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
17398c2ecf20Sopenharmony_ci	*cs++ = 0;
17408c2ecf20Sopenharmony_ci
17418c2ecf20Sopenharmony_ci	/*
17428c2ecf20Sopenharmony_ci	 * Now add the diff between to previous timestamps and add it to :
17438c2ecf20Sopenharmony_ci	 *      (((1 * << 64) - 1) - delay_ns)
17448c2ecf20Sopenharmony_ci	 *
17458c2ecf20Sopenharmony_ci	 * When the Carry Flag contains 1 this means the elapsed time is
17468c2ecf20Sopenharmony_ci	 * longer than the expected delay, and we can exit the wait loop.
17478c2ecf20Sopenharmony_ci	 */
17488c2ecf20Sopenharmony_ci	*cs++ = MI_LOAD_REGISTER_IMM(2);
17498c2ecf20Sopenharmony_ci	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
17508c2ecf20Sopenharmony_ci	*cs++ = lower_32_bits(delay_ticks);
17518c2ecf20Sopenharmony_ci	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
17528c2ecf20Sopenharmony_ci	*cs++ = upper_32_bits(delay_ticks);
17538c2ecf20Sopenharmony_ci
17548c2ecf20Sopenharmony_ci	*cs++ = MI_MATH(4);
17558c2ecf20Sopenharmony_ci	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
17568c2ecf20Sopenharmony_ci	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
17578c2ecf20Sopenharmony_ci	*cs++ = MI_MATH_ADD;
17588c2ecf20Sopenharmony_ci	*cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
17598c2ecf20Sopenharmony_ci
17608c2ecf20Sopenharmony_ci	*cs++ = MI_ARB_CHECK;
17618c2ecf20Sopenharmony_ci
17628c2ecf20Sopenharmony_ci	/*
17638c2ecf20Sopenharmony_ci	 * Transfer the result into the predicate register to be used for the
17648c2ecf20Sopenharmony_ci	 * predicated jump.
17658c2ecf20Sopenharmony_ci	 */
17668c2ecf20Sopenharmony_ci	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
17678c2ecf20Sopenharmony_ci	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
17688c2ecf20Sopenharmony_ci	*cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
17698c2ecf20Sopenharmony_ci
17708c2ecf20Sopenharmony_ci	/* Predicate the jump.  */
17718c2ecf20Sopenharmony_ci	*cs++ = (INTEL_GEN(i915) < 8 ?
17728c2ecf20Sopenharmony_ci		 MI_BATCH_BUFFER_START :
17738c2ecf20Sopenharmony_ci		 MI_BATCH_BUFFER_START_GEN8) |
17748c2ecf20Sopenharmony_ci		MI_BATCH_PREDICATE;
17758c2ecf20Sopenharmony_ci	*cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
17768c2ecf20Sopenharmony_ci	*cs++ = 0;
17778c2ecf20Sopenharmony_ci
17788c2ecf20Sopenharmony_ci	/* Restore registers. */
17798c2ecf20Sopenharmony_ci	for (i = 0; i < N_CS_GPR; i++)
17808c2ecf20Sopenharmony_ci		cs = save_restore_register(
17818c2ecf20Sopenharmony_ci			stream, cs, false /* restore */, CS_GPR(i),
17828c2ecf20Sopenharmony_ci			INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
17838c2ecf20Sopenharmony_ci	cs = save_restore_register(
17848c2ecf20Sopenharmony_ci		stream, cs, false /* restore */, MI_PREDICATE_RESULT_1,
17858c2ecf20Sopenharmony_ci		INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
17868c2ecf20Sopenharmony_ci
17878c2ecf20Sopenharmony_ci	/* And return to the ring. */
17888c2ecf20Sopenharmony_ci	*cs++ = MI_BATCH_BUFFER_END;
17898c2ecf20Sopenharmony_ci
17908c2ecf20Sopenharmony_ci	GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
17918c2ecf20Sopenharmony_ci
17928c2ecf20Sopenharmony_ci	i915_gem_object_flush_map(bo);
17938c2ecf20Sopenharmony_ci	__i915_gem_object_release_map(bo);
17948c2ecf20Sopenharmony_ci
17958c2ecf20Sopenharmony_ci	stream->noa_wait = vma;
17968c2ecf20Sopenharmony_ci	return 0;
17978c2ecf20Sopenharmony_ci
17988c2ecf20Sopenharmony_cierr_unpin:
17998c2ecf20Sopenharmony_ci	i915_vma_unpin_and_release(&vma, 0);
18008c2ecf20Sopenharmony_cierr_unref:
18018c2ecf20Sopenharmony_ci	i915_gem_object_put(bo);
18028c2ecf20Sopenharmony_ci	return ret;
18038c2ecf20Sopenharmony_ci}
18048c2ecf20Sopenharmony_ci
18058c2ecf20Sopenharmony_cistatic u32 *write_cs_mi_lri(u32 *cs,
18068c2ecf20Sopenharmony_ci			    const struct i915_oa_reg *reg_data,
18078c2ecf20Sopenharmony_ci			    u32 n_regs)
18088c2ecf20Sopenharmony_ci{
18098c2ecf20Sopenharmony_ci	u32 i;
18108c2ecf20Sopenharmony_ci
18118c2ecf20Sopenharmony_ci	for (i = 0; i < n_regs; i++) {
18128c2ecf20Sopenharmony_ci		if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
18138c2ecf20Sopenharmony_ci			u32 n_lri = min_t(u32,
18148c2ecf20Sopenharmony_ci					  n_regs - i,
18158c2ecf20Sopenharmony_ci					  MI_LOAD_REGISTER_IMM_MAX_REGS);
18168c2ecf20Sopenharmony_ci
18178c2ecf20Sopenharmony_ci			*cs++ = MI_LOAD_REGISTER_IMM(n_lri);
18188c2ecf20Sopenharmony_ci		}
18198c2ecf20Sopenharmony_ci		*cs++ = i915_mmio_reg_offset(reg_data[i].addr);
18208c2ecf20Sopenharmony_ci		*cs++ = reg_data[i].value;
18218c2ecf20Sopenharmony_ci	}
18228c2ecf20Sopenharmony_ci
18238c2ecf20Sopenharmony_ci	return cs;
18248c2ecf20Sopenharmony_ci}
18258c2ecf20Sopenharmony_ci
18268c2ecf20Sopenharmony_cistatic int num_lri_dwords(int num_regs)
18278c2ecf20Sopenharmony_ci{
18288c2ecf20Sopenharmony_ci	int count = 0;
18298c2ecf20Sopenharmony_ci
18308c2ecf20Sopenharmony_ci	if (num_regs > 0) {
18318c2ecf20Sopenharmony_ci		count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
18328c2ecf20Sopenharmony_ci		count += num_regs * 2;
18338c2ecf20Sopenharmony_ci	}
18348c2ecf20Sopenharmony_ci
18358c2ecf20Sopenharmony_ci	return count;
18368c2ecf20Sopenharmony_ci}
18378c2ecf20Sopenharmony_ci
18388c2ecf20Sopenharmony_cistatic struct i915_oa_config_bo *
18398c2ecf20Sopenharmony_cialloc_oa_config_buffer(struct i915_perf_stream *stream,
18408c2ecf20Sopenharmony_ci		       struct i915_oa_config *oa_config)
18418c2ecf20Sopenharmony_ci{
18428c2ecf20Sopenharmony_ci	struct drm_i915_gem_object *obj;
18438c2ecf20Sopenharmony_ci	struct i915_oa_config_bo *oa_bo;
18448c2ecf20Sopenharmony_ci	size_t config_length = 0;
18458c2ecf20Sopenharmony_ci	u32 *cs;
18468c2ecf20Sopenharmony_ci	int err;
18478c2ecf20Sopenharmony_ci
18488c2ecf20Sopenharmony_ci	oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
18498c2ecf20Sopenharmony_ci	if (!oa_bo)
18508c2ecf20Sopenharmony_ci		return ERR_PTR(-ENOMEM);
18518c2ecf20Sopenharmony_ci
18528c2ecf20Sopenharmony_ci	config_length += num_lri_dwords(oa_config->mux_regs_len);
18538c2ecf20Sopenharmony_ci	config_length += num_lri_dwords(oa_config->b_counter_regs_len);
18548c2ecf20Sopenharmony_ci	config_length += num_lri_dwords(oa_config->flex_regs_len);
18558c2ecf20Sopenharmony_ci	config_length += 3; /* MI_BATCH_BUFFER_START */
18568c2ecf20Sopenharmony_ci	config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
18578c2ecf20Sopenharmony_ci
18588c2ecf20Sopenharmony_ci	obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
18598c2ecf20Sopenharmony_ci	if (IS_ERR(obj)) {
18608c2ecf20Sopenharmony_ci		err = PTR_ERR(obj);
18618c2ecf20Sopenharmony_ci		goto err_free;
18628c2ecf20Sopenharmony_ci	}
18638c2ecf20Sopenharmony_ci
18648c2ecf20Sopenharmony_ci	cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
18658c2ecf20Sopenharmony_ci	if (IS_ERR(cs)) {
18668c2ecf20Sopenharmony_ci		err = PTR_ERR(cs);
18678c2ecf20Sopenharmony_ci		goto err_oa_bo;
18688c2ecf20Sopenharmony_ci	}
18698c2ecf20Sopenharmony_ci
18708c2ecf20Sopenharmony_ci	cs = write_cs_mi_lri(cs,
18718c2ecf20Sopenharmony_ci			     oa_config->mux_regs,
18728c2ecf20Sopenharmony_ci			     oa_config->mux_regs_len);
18738c2ecf20Sopenharmony_ci	cs = write_cs_mi_lri(cs,
18748c2ecf20Sopenharmony_ci			     oa_config->b_counter_regs,
18758c2ecf20Sopenharmony_ci			     oa_config->b_counter_regs_len);
18768c2ecf20Sopenharmony_ci	cs = write_cs_mi_lri(cs,
18778c2ecf20Sopenharmony_ci			     oa_config->flex_regs,
18788c2ecf20Sopenharmony_ci			     oa_config->flex_regs_len);
18798c2ecf20Sopenharmony_ci
18808c2ecf20Sopenharmony_ci	/* Jump into the active wait. */
18818c2ecf20Sopenharmony_ci	*cs++ = (INTEL_GEN(stream->perf->i915) < 8 ?
18828c2ecf20Sopenharmony_ci		 MI_BATCH_BUFFER_START :
18838c2ecf20Sopenharmony_ci		 MI_BATCH_BUFFER_START_GEN8);
18848c2ecf20Sopenharmony_ci	*cs++ = i915_ggtt_offset(stream->noa_wait);
18858c2ecf20Sopenharmony_ci	*cs++ = 0;
18868c2ecf20Sopenharmony_ci
18878c2ecf20Sopenharmony_ci	i915_gem_object_flush_map(obj);
18888c2ecf20Sopenharmony_ci	__i915_gem_object_release_map(obj);
18898c2ecf20Sopenharmony_ci
18908c2ecf20Sopenharmony_ci	oa_bo->vma = i915_vma_instance(obj,
18918c2ecf20Sopenharmony_ci				       &stream->engine->gt->ggtt->vm,
18928c2ecf20Sopenharmony_ci				       NULL);
18938c2ecf20Sopenharmony_ci	if (IS_ERR(oa_bo->vma)) {
18948c2ecf20Sopenharmony_ci		err = PTR_ERR(oa_bo->vma);
18958c2ecf20Sopenharmony_ci		goto err_oa_bo;
18968c2ecf20Sopenharmony_ci	}
18978c2ecf20Sopenharmony_ci
18988c2ecf20Sopenharmony_ci	oa_bo->oa_config = i915_oa_config_get(oa_config);
18998c2ecf20Sopenharmony_ci	llist_add(&oa_bo->node, &stream->oa_config_bos);
19008c2ecf20Sopenharmony_ci
19018c2ecf20Sopenharmony_ci	return oa_bo;
19028c2ecf20Sopenharmony_ci
19038c2ecf20Sopenharmony_cierr_oa_bo:
19048c2ecf20Sopenharmony_ci	i915_gem_object_put(obj);
19058c2ecf20Sopenharmony_cierr_free:
19068c2ecf20Sopenharmony_ci	kfree(oa_bo);
19078c2ecf20Sopenharmony_ci	return ERR_PTR(err);
19088c2ecf20Sopenharmony_ci}
19098c2ecf20Sopenharmony_ci
19108c2ecf20Sopenharmony_cistatic struct i915_vma *
19118c2ecf20Sopenharmony_ciget_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
19128c2ecf20Sopenharmony_ci{
19138c2ecf20Sopenharmony_ci	struct i915_oa_config_bo *oa_bo;
19148c2ecf20Sopenharmony_ci
19158c2ecf20Sopenharmony_ci	/*
19168c2ecf20Sopenharmony_ci	 * Look for the buffer in the already allocated BOs attached
19178c2ecf20Sopenharmony_ci	 * to the stream.
19188c2ecf20Sopenharmony_ci	 */
19198c2ecf20Sopenharmony_ci	llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
19208c2ecf20Sopenharmony_ci		if (oa_bo->oa_config == oa_config &&
19218c2ecf20Sopenharmony_ci		    memcmp(oa_bo->oa_config->uuid,
19228c2ecf20Sopenharmony_ci			   oa_config->uuid,
19238c2ecf20Sopenharmony_ci			   sizeof(oa_config->uuid)) == 0)
19248c2ecf20Sopenharmony_ci			goto out;
19258c2ecf20Sopenharmony_ci	}
19268c2ecf20Sopenharmony_ci
19278c2ecf20Sopenharmony_ci	oa_bo = alloc_oa_config_buffer(stream, oa_config);
19288c2ecf20Sopenharmony_ci	if (IS_ERR(oa_bo))
19298c2ecf20Sopenharmony_ci		return ERR_CAST(oa_bo);
19308c2ecf20Sopenharmony_ci
19318c2ecf20Sopenharmony_ciout:
19328c2ecf20Sopenharmony_ci	return i915_vma_get(oa_bo->vma);
19338c2ecf20Sopenharmony_ci}
19348c2ecf20Sopenharmony_ci
19358c2ecf20Sopenharmony_cistatic int
19368c2ecf20Sopenharmony_ciemit_oa_config(struct i915_perf_stream *stream,
19378c2ecf20Sopenharmony_ci	       struct i915_oa_config *oa_config,
19388c2ecf20Sopenharmony_ci	       struct intel_context *ce,
19398c2ecf20Sopenharmony_ci	       struct i915_active *active)
19408c2ecf20Sopenharmony_ci{
19418c2ecf20Sopenharmony_ci	struct i915_request *rq;
19428c2ecf20Sopenharmony_ci	struct i915_vma *vma;
19438c2ecf20Sopenharmony_ci	struct i915_gem_ww_ctx ww;
19448c2ecf20Sopenharmony_ci	int err;
19458c2ecf20Sopenharmony_ci
19468c2ecf20Sopenharmony_ci	vma = get_oa_vma(stream, oa_config);
19478c2ecf20Sopenharmony_ci	if (IS_ERR(vma))
19488c2ecf20Sopenharmony_ci		return PTR_ERR(vma);
19498c2ecf20Sopenharmony_ci
19508c2ecf20Sopenharmony_ci	i915_gem_ww_ctx_init(&ww, true);
19518c2ecf20Sopenharmony_ciretry:
19528c2ecf20Sopenharmony_ci	err = i915_gem_object_lock(vma->obj, &ww);
19538c2ecf20Sopenharmony_ci	if (err)
19548c2ecf20Sopenharmony_ci		goto err;
19558c2ecf20Sopenharmony_ci
19568c2ecf20Sopenharmony_ci	err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
19578c2ecf20Sopenharmony_ci	if (err)
19588c2ecf20Sopenharmony_ci		goto err;
19598c2ecf20Sopenharmony_ci
19608c2ecf20Sopenharmony_ci	intel_engine_pm_get(ce->engine);
19618c2ecf20Sopenharmony_ci	rq = i915_request_create(ce);
19628c2ecf20Sopenharmony_ci	intel_engine_pm_put(ce->engine);
19638c2ecf20Sopenharmony_ci	if (IS_ERR(rq)) {
19648c2ecf20Sopenharmony_ci		err = PTR_ERR(rq);
19658c2ecf20Sopenharmony_ci		goto err_vma_unpin;
19668c2ecf20Sopenharmony_ci	}
19678c2ecf20Sopenharmony_ci
19688c2ecf20Sopenharmony_ci	if (!IS_ERR_OR_NULL(active)) {
19698c2ecf20Sopenharmony_ci		/* After all individual context modifications */
19708c2ecf20Sopenharmony_ci		err = i915_request_await_active(rq, active,
19718c2ecf20Sopenharmony_ci						I915_ACTIVE_AWAIT_ACTIVE);
19728c2ecf20Sopenharmony_ci		if (err)
19738c2ecf20Sopenharmony_ci			goto err_add_request;
19748c2ecf20Sopenharmony_ci
19758c2ecf20Sopenharmony_ci		err = i915_active_add_request(active, rq);
19768c2ecf20Sopenharmony_ci		if (err)
19778c2ecf20Sopenharmony_ci			goto err_add_request;
19788c2ecf20Sopenharmony_ci	}
19798c2ecf20Sopenharmony_ci
19808c2ecf20Sopenharmony_ci	err = i915_request_await_object(rq, vma->obj, 0);
19818c2ecf20Sopenharmony_ci	if (!err)
19828c2ecf20Sopenharmony_ci		err = i915_vma_move_to_active(vma, rq, 0);
19838c2ecf20Sopenharmony_ci	if (err)
19848c2ecf20Sopenharmony_ci		goto err_add_request;
19858c2ecf20Sopenharmony_ci
19868c2ecf20Sopenharmony_ci	err = rq->engine->emit_bb_start(rq,
19878c2ecf20Sopenharmony_ci					vma->node.start, 0,
19888c2ecf20Sopenharmony_ci					I915_DISPATCH_SECURE);
19898c2ecf20Sopenharmony_ci	if (err)
19908c2ecf20Sopenharmony_ci		goto err_add_request;
19918c2ecf20Sopenharmony_ci
19928c2ecf20Sopenharmony_cierr_add_request:
19938c2ecf20Sopenharmony_ci	i915_request_add(rq);
19948c2ecf20Sopenharmony_cierr_vma_unpin:
19958c2ecf20Sopenharmony_ci	i915_vma_unpin(vma);
19968c2ecf20Sopenharmony_cierr:
19978c2ecf20Sopenharmony_ci	if (err == -EDEADLK) {
19988c2ecf20Sopenharmony_ci		err = i915_gem_ww_ctx_backoff(&ww);
19998c2ecf20Sopenharmony_ci		if (!err)
20008c2ecf20Sopenharmony_ci			goto retry;
20018c2ecf20Sopenharmony_ci	}
20028c2ecf20Sopenharmony_ci
20038c2ecf20Sopenharmony_ci	i915_gem_ww_ctx_fini(&ww);
20048c2ecf20Sopenharmony_ci	i915_vma_put(vma);
20058c2ecf20Sopenharmony_ci	return err;
20068c2ecf20Sopenharmony_ci}
20078c2ecf20Sopenharmony_ci
20088c2ecf20Sopenharmony_cistatic struct intel_context *oa_context(struct i915_perf_stream *stream)
20098c2ecf20Sopenharmony_ci{
20108c2ecf20Sopenharmony_ci	return stream->pinned_ctx ?: stream->engine->kernel_context;
20118c2ecf20Sopenharmony_ci}
20128c2ecf20Sopenharmony_ci
20138c2ecf20Sopenharmony_cistatic int
20148c2ecf20Sopenharmony_cihsw_enable_metric_set(struct i915_perf_stream *stream,
20158c2ecf20Sopenharmony_ci		      struct i915_active *active)
20168c2ecf20Sopenharmony_ci{
20178c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
20188c2ecf20Sopenharmony_ci
20198c2ecf20Sopenharmony_ci	/*
20208c2ecf20Sopenharmony_ci	 * PRM:
20218c2ecf20Sopenharmony_ci	 *
20228c2ecf20Sopenharmony_ci	 * OA unit is using “crclk” for its functionality. When trunk
20238c2ecf20Sopenharmony_ci	 * level clock gating takes place, OA clock would be gated,
20248c2ecf20Sopenharmony_ci	 * unable to count the events from non-render clock domain.
20258c2ecf20Sopenharmony_ci	 * Render clock gating must be disabled when OA is enabled to
20268c2ecf20Sopenharmony_ci	 * count the events from non-render domain. Unit level clock
20278c2ecf20Sopenharmony_ci	 * gating for RCS should also be disabled.
20288c2ecf20Sopenharmony_ci	 */
20298c2ecf20Sopenharmony_ci	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
20308c2ecf20Sopenharmony_ci			 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
20318c2ecf20Sopenharmony_ci	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
20328c2ecf20Sopenharmony_ci			 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
20338c2ecf20Sopenharmony_ci
20348c2ecf20Sopenharmony_ci	return emit_oa_config(stream,
20358c2ecf20Sopenharmony_ci			      stream->oa_config, oa_context(stream),
20368c2ecf20Sopenharmony_ci			      active);
20378c2ecf20Sopenharmony_ci}
20388c2ecf20Sopenharmony_ci
20398c2ecf20Sopenharmony_cistatic void hsw_disable_metric_set(struct i915_perf_stream *stream)
20408c2ecf20Sopenharmony_ci{
20418c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
20428c2ecf20Sopenharmony_ci
20438c2ecf20Sopenharmony_ci	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
20448c2ecf20Sopenharmony_ci			 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
20458c2ecf20Sopenharmony_ci	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
20468c2ecf20Sopenharmony_ci			 0, GEN7_DOP_CLOCK_GATE_ENABLE);
20478c2ecf20Sopenharmony_ci
20488c2ecf20Sopenharmony_ci	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
20498c2ecf20Sopenharmony_ci}
20508c2ecf20Sopenharmony_ci
20518c2ecf20Sopenharmony_cistatic u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
20528c2ecf20Sopenharmony_ci			      i915_reg_t reg)
20538c2ecf20Sopenharmony_ci{
20548c2ecf20Sopenharmony_ci	u32 mmio = i915_mmio_reg_offset(reg);
20558c2ecf20Sopenharmony_ci	int i;
20568c2ecf20Sopenharmony_ci
20578c2ecf20Sopenharmony_ci	/*
20588c2ecf20Sopenharmony_ci	 * This arbitrary default will select the 'EU FPU0 Pipeline
20598c2ecf20Sopenharmony_ci	 * Active' event. In the future it's anticipated that there
20608c2ecf20Sopenharmony_ci	 * will be an explicit 'No Event' we can select, but not yet...
20618c2ecf20Sopenharmony_ci	 */
20628c2ecf20Sopenharmony_ci	if (!oa_config)
20638c2ecf20Sopenharmony_ci		return 0;
20648c2ecf20Sopenharmony_ci
20658c2ecf20Sopenharmony_ci	for (i = 0; i < oa_config->flex_regs_len; i++) {
20668c2ecf20Sopenharmony_ci		if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
20678c2ecf20Sopenharmony_ci			return oa_config->flex_regs[i].value;
20688c2ecf20Sopenharmony_ci	}
20698c2ecf20Sopenharmony_ci
20708c2ecf20Sopenharmony_ci	return 0;
20718c2ecf20Sopenharmony_ci}
20728c2ecf20Sopenharmony_ci/*
20738c2ecf20Sopenharmony_ci * NB: It must always remain pointer safe to run this even if the OA unit
20748c2ecf20Sopenharmony_ci * has been disabled.
20758c2ecf20Sopenharmony_ci *
20768c2ecf20Sopenharmony_ci * It's fine to put out-of-date values into these per-context registers
20778c2ecf20Sopenharmony_ci * in the case that the OA unit has been disabled.
20788c2ecf20Sopenharmony_ci */
20798c2ecf20Sopenharmony_cistatic void
20808c2ecf20Sopenharmony_cigen8_update_reg_state_unlocked(const struct intel_context *ce,
20818c2ecf20Sopenharmony_ci			       const struct i915_perf_stream *stream)
20828c2ecf20Sopenharmony_ci{
20838c2ecf20Sopenharmony_ci	u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
20848c2ecf20Sopenharmony_ci	u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
20858c2ecf20Sopenharmony_ci	/* The MMIO offsets for Flex EU registers aren't contiguous */
20868c2ecf20Sopenharmony_ci	i915_reg_t flex_regs[] = {
20878c2ecf20Sopenharmony_ci		EU_PERF_CNTL0,
20888c2ecf20Sopenharmony_ci		EU_PERF_CNTL1,
20898c2ecf20Sopenharmony_ci		EU_PERF_CNTL2,
20908c2ecf20Sopenharmony_ci		EU_PERF_CNTL3,
20918c2ecf20Sopenharmony_ci		EU_PERF_CNTL4,
20928c2ecf20Sopenharmony_ci		EU_PERF_CNTL5,
20938c2ecf20Sopenharmony_ci		EU_PERF_CNTL6,
20948c2ecf20Sopenharmony_ci	};
20958c2ecf20Sopenharmony_ci	u32 *reg_state = ce->lrc_reg_state;
20968c2ecf20Sopenharmony_ci	int i;
20978c2ecf20Sopenharmony_ci
20988c2ecf20Sopenharmony_ci	reg_state[ctx_oactxctrl + 1] =
20998c2ecf20Sopenharmony_ci		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
21008c2ecf20Sopenharmony_ci		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
21018c2ecf20Sopenharmony_ci		GEN8_OA_COUNTER_RESUME;
21028c2ecf20Sopenharmony_ci
21038c2ecf20Sopenharmony_ci	for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
21048c2ecf20Sopenharmony_ci		reg_state[ctx_flexeu0 + i * 2 + 1] =
21058c2ecf20Sopenharmony_ci			oa_config_flex_reg(stream->oa_config, flex_regs[i]);
21068c2ecf20Sopenharmony_ci}
21078c2ecf20Sopenharmony_ci
21088c2ecf20Sopenharmony_cistruct flex {
21098c2ecf20Sopenharmony_ci	i915_reg_t reg;
21108c2ecf20Sopenharmony_ci	u32 offset;
21118c2ecf20Sopenharmony_ci	u32 value;
21128c2ecf20Sopenharmony_ci};
21138c2ecf20Sopenharmony_ci
21148c2ecf20Sopenharmony_cistatic int
21158c2ecf20Sopenharmony_cigen8_store_flex(struct i915_request *rq,
21168c2ecf20Sopenharmony_ci		struct intel_context *ce,
21178c2ecf20Sopenharmony_ci		const struct flex *flex, unsigned int count)
21188c2ecf20Sopenharmony_ci{
21198c2ecf20Sopenharmony_ci	u32 offset;
21208c2ecf20Sopenharmony_ci	u32 *cs;
21218c2ecf20Sopenharmony_ci
21228c2ecf20Sopenharmony_ci	cs = intel_ring_begin(rq, 4 * count);
21238c2ecf20Sopenharmony_ci	if (IS_ERR(cs))
21248c2ecf20Sopenharmony_ci		return PTR_ERR(cs);
21258c2ecf20Sopenharmony_ci
21268c2ecf20Sopenharmony_ci	offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
21278c2ecf20Sopenharmony_ci	do {
21288c2ecf20Sopenharmony_ci		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
21298c2ecf20Sopenharmony_ci		*cs++ = offset + flex->offset * sizeof(u32);
21308c2ecf20Sopenharmony_ci		*cs++ = 0;
21318c2ecf20Sopenharmony_ci		*cs++ = flex->value;
21328c2ecf20Sopenharmony_ci	} while (flex++, --count);
21338c2ecf20Sopenharmony_ci
21348c2ecf20Sopenharmony_ci	intel_ring_advance(rq, cs);
21358c2ecf20Sopenharmony_ci
21368c2ecf20Sopenharmony_ci	return 0;
21378c2ecf20Sopenharmony_ci}
21388c2ecf20Sopenharmony_ci
21398c2ecf20Sopenharmony_cistatic int
21408c2ecf20Sopenharmony_cigen8_load_flex(struct i915_request *rq,
21418c2ecf20Sopenharmony_ci	       struct intel_context *ce,
21428c2ecf20Sopenharmony_ci	       const struct flex *flex, unsigned int count)
21438c2ecf20Sopenharmony_ci{
21448c2ecf20Sopenharmony_ci	u32 *cs;
21458c2ecf20Sopenharmony_ci
21468c2ecf20Sopenharmony_ci	GEM_BUG_ON(!count || count > 63);
21478c2ecf20Sopenharmony_ci
21488c2ecf20Sopenharmony_ci	cs = intel_ring_begin(rq, 2 * count + 2);
21498c2ecf20Sopenharmony_ci	if (IS_ERR(cs))
21508c2ecf20Sopenharmony_ci		return PTR_ERR(cs);
21518c2ecf20Sopenharmony_ci
21528c2ecf20Sopenharmony_ci	*cs++ = MI_LOAD_REGISTER_IMM(count);
21538c2ecf20Sopenharmony_ci	do {
21548c2ecf20Sopenharmony_ci		*cs++ = i915_mmio_reg_offset(flex->reg);
21558c2ecf20Sopenharmony_ci		*cs++ = flex->value;
21568c2ecf20Sopenharmony_ci	} while (flex++, --count);
21578c2ecf20Sopenharmony_ci	*cs++ = MI_NOOP;
21588c2ecf20Sopenharmony_ci
21598c2ecf20Sopenharmony_ci	intel_ring_advance(rq, cs);
21608c2ecf20Sopenharmony_ci
21618c2ecf20Sopenharmony_ci	return 0;
21628c2ecf20Sopenharmony_ci}
21638c2ecf20Sopenharmony_ci
21648c2ecf20Sopenharmony_cistatic int gen8_modify_context(struct intel_context *ce,
21658c2ecf20Sopenharmony_ci			       const struct flex *flex, unsigned int count)
21668c2ecf20Sopenharmony_ci{
21678c2ecf20Sopenharmony_ci	struct i915_request *rq;
21688c2ecf20Sopenharmony_ci	int err;
21698c2ecf20Sopenharmony_ci
21708c2ecf20Sopenharmony_ci	rq = intel_engine_create_kernel_request(ce->engine);
21718c2ecf20Sopenharmony_ci	if (IS_ERR(rq))
21728c2ecf20Sopenharmony_ci		return PTR_ERR(rq);
21738c2ecf20Sopenharmony_ci
21748c2ecf20Sopenharmony_ci	/* Serialise with the remote context */
21758c2ecf20Sopenharmony_ci	err = intel_context_prepare_remote_request(ce, rq);
21768c2ecf20Sopenharmony_ci	if (err == 0)
21778c2ecf20Sopenharmony_ci		err = gen8_store_flex(rq, ce, flex, count);
21788c2ecf20Sopenharmony_ci
21798c2ecf20Sopenharmony_ci	i915_request_add(rq);
21808c2ecf20Sopenharmony_ci	return err;
21818c2ecf20Sopenharmony_ci}
21828c2ecf20Sopenharmony_ci
21838c2ecf20Sopenharmony_cistatic int
21848c2ecf20Sopenharmony_cigen8_modify_self(struct intel_context *ce,
21858c2ecf20Sopenharmony_ci		 const struct flex *flex, unsigned int count,
21868c2ecf20Sopenharmony_ci		 struct i915_active *active)
21878c2ecf20Sopenharmony_ci{
21888c2ecf20Sopenharmony_ci	struct i915_request *rq;
21898c2ecf20Sopenharmony_ci	int err;
21908c2ecf20Sopenharmony_ci
21918c2ecf20Sopenharmony_ci	intel_engine_pm_get(ce->engine);
21928c2ecf20Sopenharmony_ci	rq = i915_request_create(ce);
21938c2ecf20Sopenharmony_ci	intel_engine_pm_put(ce->engine);
21948c2ecf20Sopenharmony_ci	if (IS_ERR(rq))
21958c2ecf20Sopenharmony_ci		return PTR_ERR(rq);
21968c2ecf20Sopenharmony_ci
21978c2ecf20Sopenharmony_ci	if (!IS_ERR_OR_NULL(active)) {
21988c2ecf20Sopenharmony_ci		err = i915_active_add_request(active, rq);
21998c2ecf20Sopenharmony_ci		if (err)
22008c2ecf20Sopenharmony_ci			goto err_add_request;
22018c2ecf20Sopenharmony_ci	}
22028c2ecf20Sopenharmony_ci
22038c2ecf20Sopenharmony_ci	err = gen8_load_flex(rq, ce, flex, count);
22048c2ecf20Sopenharmony_ci	if (err)
22058c2ecf20Sopenharmony_ci		goto err_add_request;
22068c2ecf20Sopenharmony_ci
22078c2ecf20Sopenharmony_cierr_add_request:
22088c2ecf20Sopenharmony_ci	i915_request_add(rq);
22098c2ecf20Sopenharmony_ci	return err;
22108c2ecf20Sopenharmony_ci}
22118c2ecf20Sopenharmony_ci
22128c2ecf20Sopenharmony_cistatic int gen8_configure_context(struct i915_gem_context *ctx,
22138c2ecf20Sopenharmony_ci				  struct flex *flex, unsigned int count)
22148c2ecf20Sopenharmony_ci{
22158c2ecf20Sopenharmony_ci	struct i915_gem_engines_iter it;
22168c2ecf20Sopenharmony_ci	struct intel_context *ce;
22178c2ecf20Sopenharmony_ci	int err = 0;
22188c2ecf20Sopenharmony_ci
22198c2ecf20Sopenharmony_ci	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
22208c2ecf20Sopenharmony_ci		GEM_BUG_ON(ce == ce->engine->kernel_context);
22218c2ecf20Sopenharmony_ci
22228c2ecf20Sopenharmony_ci		if (ce->engine->class != RENDER_CLASS)
22238c2ecf20Sopenharmony_ci			continue;
22248c2ecf20Sopenharmony_ci
22258c2ecf20Sopenharmony_ci		/* Otherwise OA settings will be set upon first use */
22268c2ecf20Sopenharmony_ci		if (!intel_context_pin_if_active(ce))
22278c2ecf20Sopenharmony_ci			continue;
22288c2ecf20Sopenharmony_ci
22298c2ecf20Sopenharmony_ci		flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
22308c2ecf20Sopenharmony_ci		err = gen8_modify_context(ce, flex, count);
22318c2ecf20Sopenharmony_ci
22328c2ecf20Sopenharmony_ci		intel_context_unpin(ce);
22338c2ecf20Sopenharmony_ci		if (err)
22348c2ecf20Sopenharmony_ci			break;
22358c2ecf20Sopenharmony_ci	}
22368c2ecf20Sopenharmony_ci	i915_gem_context_unlock_engines(ctx);
22378c2ecf20Sopenharmony_ci
22388c2ecf20Sopenharmony_ci	return err;
22398c2ecf20Sopenharmony_ci}
22408c2ecf20Sopenharmony_ci
22418c2ecf20Sopenharmony_cistatic int gen12_configure_oar_context(struct i915_perf_stream *stream,
22428c2ecf20Sopenharmony_ci				       struct i915_active *active)
22438c2ecf20Sopenharmony_ci{
22448c2ecf20Sopenharmony_ci	int err;
22458c2ecf20Sopenharmony_ci	struct intel_context *ce = stream->pinned_ctx;
22468c2ecf20Sopenharmony_ci	u32 format = stream->oa_buffer.format;
22478c2ecf20Sopenharmony_ci	struct flex regs_context[] = {
22488c2ecf20Sopenharmony_ci		{
22498c2ecf20Sopenharmony_ci			GEN8_OACTXCONTROL,
22508c2ecf20Sopenharmony_ci			stream->perf->ctx_oactxctrl_offset + 1,
22518c2ecf20Sopenharmony_ci			active ? GEN8_OA_COUNTER_RESUME : 0,
22528c2ecf20Sopenharmony_ci		},
22538c2ecf20Sopenharmony_ci	};
22548c2ecf20Sopenharmony_ci	/* Offsets in regs_lri are not used since this configuration is only
22558c2ecf20Sopenharmony_ci	 * applied using LRI. Initialize the correct offsets for posterity.
22568c2ecf20Sopenharmony_ci	 */
22578c2ecf20Sopenharmony_ci#define GEN12_OAR_OACONTROL_OFFSET 0x5B0
22588c2ecf20Sopenharmony_ci	struct flex regs_lri[] = {
22598c2ecf20Sopenharmony_ci		{
22608c2ecf20Sopenharmony_ci			GEN12_OAR_OACONTROL,
22618c2ecf20Sopenharmony_ci			GEN12_OAR_OACONTROL_OFFSET + 1,
22628c2ecf20Sopenharmony_ci			(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
22638c2ecf20Sopenharmony_ci			(active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
22648c2ecf20Sopenharmony_ci		},
22658c2ecf20Sopenharmony_ci		{
22668c2ecf20Sopenharmony_ci			RING_CONTEXT_CONTROL(ce->engine->mmio_base),
22678c2ecf20Sopenharmony_ci			CTX_CONTEXT_CONTROL,
22688c2ecf20Sopenharmony_ci			_MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
22698c2ecf20Sopenharmony_ci				      active ?
22708c2ecf20Sopenharmony_ci				      GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
22718c2ecf20Sopenharmony_ci				      0)
22728c2ecf20Sopenharmony_ci		},
22738c2ecf20Sopenharmony_ci	};
22748c2ecf20Sopenharmony_ci
22758c2ecf20Sopenharmony_ci	/* Modify the context image of pinned context with regs_context*/
22768c2ecf20Sopenharmony_ci	err = intel_context_lock_pinned(ce);
22778c2ecf20Sopenharmony_ci	if (err)
22788c2ecf20Sopenharmony_ci		return err;
22798c2ecf20Sopenharmony_ci
22808c2ecf20Sopenharmony_ci	err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context));
22818c2ecf20Sopenharmony_ci	intel_context_unlock_pinned(ce);
22828c2ecf20Sopenharmony_ci	if (err)
22838c2ecf20Sopenharmony_ci		return err;
22848c2ecf20Sopenharmony_ci
22858c2ecf20Sopenharmony_ci	/* Apply regs_lri using LRI with pinned context */
22868c2ecf20Sopenharmony_ci	return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
22878c2ecf20Sopenharmony_ci}
22888c2ecf20Sopenharmony_ci
22898c2ecf20Sopenharmony_ci/*
22908c2ecf20Sopenharmony_ci * Manages updating the per-context aspects of the OA stream
22918c2ecf20Sopenharmony_ci * configuration across all contexts.
22928c2ecf20Sopenharmony_ci *
22938c2ecf20Sopenharmony_ci * The awkward consideration here is that OACTXCONTROL controls the
22948c2ecf20Sopenharmony_ci * exponent for periodic sampling which is primarily used for system
22958c2ecf20Sopenharmony_ci * wide profiling where we'd like a consistent sampling period even in
22968c2ecf20Sopenharmony_ci * the face of context switches.
22978c2ecf20Sopenharmony_ci *
22988c2ecf20Sopenharmony_ci * Our approach of updating the register state context (as opposed to
22998c2ecf20Sopenharmony_ci * say using a workaround batch buffer) ensures that the hardware
23008c2ecf20Sopenharmony_ci * won't automatically reload an out-of-date timer exponent even
23018c2ecf20Sopenharmony_ci * transiently before a WA BB could be parsed.
23028c2ecf20Sopenharmony_ci *
23038c2ecf20Sopenharmony_ci * This function needs to:
23048c2ecf20Sopenharmony_ci * - Ensure the currently running context's per-context OA state is
23058c2ecf20Sopenharmony_ci *   updated
23068c2ecf20Sopenharmony_ci * - Ensure that all existing contexts will have the correct per-context
23078c2ecf20Sopenharmony_ci *   OA state if they are scheduled for use.
23088c2ecf20Sopenharmony_ci * - Ensure any new contexts will be initialized with the correct
23098c2ecf20Sopenharmony_ci *   per-context OA state.
23108c2ecf20Sopenharmony_ci *
23118c2ecf20Sopenharmony_ci * Note: it's only the RCS/Render context that has any OA state.
23128c2ecf20Sopenharmony_ci * Note: the first flex register passed must always be R_PWR_CLK_STATE
23138c2ecf20Sopenharmony_ci */
23148c2ecf20Sopenharmony_cistatic int
23158c2ecf20Sopenharmony_cioa_configure_all_contexts(struct i915_perf_stream *stream,
23168c2ecf20Sopenharmony_ci			  struct flex *regs,
23178c2ecf20Sopenharmony_ci			  size_t num_regs,
23188c2ecf20Sopenharmony_ci			  struct i915_active *active)
23198c2ecf20Sopenharmony_ci{
23208c2ecf20Sopenharmony_ci	struct drm_i915_private *i915 = stream->perf->i915;
23218c2ecf20Sopenharmony_ci	struct intel_engine_cs *engine;
23228c2ecf20Sopenharmony_ci	struct i915_gem_context *ctx, *cn;
23238c2ecf20Sopenharmony_ci	int err;
23248c2ecf20Sopenharmony_ci
23258c2ecf20Sopenharmony_ci	lockdep_assert_held(&stream->perf->lock);
23268c2ecf20Sopenharmony_ci
23278c2ecf20Sopenharmony_ci	/*
23288c2ecf20Sopenharmony_ci	 * The OA register config is setup through the context image. This image
23298c2ecf20Sopenharmony_ci	 * might be written to by the GPU on context switch (in particular on
23308c2ecf20Sopenharmony_ci	 * lite-restore). This means we can't safely update a context's image,
23318c2ecf20Sopenharmony_ci	 * if this context is scheduled/submitted to run on the GPU.
23328c2ecf20Sopenharmony_ci	 *
23338c2ecf20Sopenharmony_ci	 * We could emit the OA register config through the batch buffer but
23348c2ecf20Sopenharmony_ci	 * this might leave small interval of time where the OA unit is
23358c2ecf20Sopenharmony_ci	 * configured at an invalid sampling period.
23368c2ecf20Sopenharmony_ci	 *
23378c2ecf20Sopenharmony_ci	 * Note that since we emit all requests from a single ring, there
23388c2ecf20Sopenharmony_ci	 * is still an implicit global barrier here that may cause a high
23398c2ecf20Sopenharmony_ci	 * priority context to wait for an otherwise independent low priority
23408c2ecf20Sopenharmony_ci	 * context. Contexts idle at the time of reconfiguration are not
23418c2ecf20Sopenharmony_ci	 * trapped behind the barrier.
23428c2ecf20Sopenharmony_ci	 */
23438c2ecf20Sopenharmony_ci	spin_lock(&i915->gem.contexts.lock);
23448c2ecf20Sopenharmony_ci	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
23458c2ecf20Sopenharmony_ci		if (!kref_get_unless_zero(&ctx->ref))
23468c2ecf20Sopenharmony_ci			continue;
23478c2ecf20Sopenharmony_ci
23488c2ecf20Sopenharmony_ci		spin_unlock(&i915->gem.contexts.lock);
23498c2ecf20Sopenharmony_ci
23508c2ecf20Sopenharmony_ci		err = gen8_configure_context(ctx, regs, num_regs);
23518c2ecf20Sopenharmony_ci		if (err) {
23528c2ecf20Sopenharmony_ci			i915_gem_context_put(ctx);
23538c2ecf20Sopenharmony_ci			return err;
23548c2ecf20Sopenharmony_ci		}
23558c2ecf20Sopenharmony_ci
23568c2ecf20Sopenharmony_ci		spin_lock(&i915->gem.contexts.lock);
23578c2ecf20Sopenharmony_ci		list_safe_reset_next(ctx, cn, link);
23588c2ecf20Sopenharmony_ci		i915_gem_context_put(ctx);
23598c2ecf20Sopenharmony_ci	}
23608c2ecf20Sopenharmony_ci	spin_unlock(&i915->gem.contexts.lock);
23618c2ecf20Sopenharmony_ci
23628c2ecf20Sopenharmony_ci	/*
23638c2ecf20Sopenharmony_ci	 * After updating all other contexts, we need to modify ourselves.
23648c2ecf20Sopenharmony_ci	 * If we don't modify the kernel_context, we do not get events while
23658c2ecf20Sopenharmony_ci	 * idle.
23668c2ecf20Sopenharmony_ci	 */
23678c2ecf20Sopenharmony_ci	for_each_uabi_engine(engine, i915) {
23688c2ecf20Sopenharmony_ci		struct intel_context *ce = engine->kernel_context;
23698c2ecf20Sopenharmony_ci
23708c2ecf20Sopenharmony_ci		if (engine->class != RENDER_CLASS)
23718c2ecf20Sopenharmony_ci			continue;
23728c2ecf20Sopenharmony_ci
23738c2ecf20Sopenharmony_ci		regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
23748c2ecf20Sopenharmony_ci
23758c2ecf20Sopenharmony_ci		err = gen8_modify_self(ce, regs, num_regs, active);
23768c2ecf20Sopenharmony_ci		if (err)
23778c2ecf20Sopenharmony_ci			return err;
23788c2ecf20Sopenharmony_ci	}
23798c2ecf20Sopenharmony_ci
23808c2ecf20Sopenharmony_ci	return 0;
23818c2ecf20Sopenharmony_ci}
23828c2ecf20Sopenharmony_ci
23838c2ecf20Sopenharmony_cistatic int
23848c2ecf20Sopenharmony_cigen12_configure_all_contexts(struct i915_perf_stream *stream,
23858c2ecf20Sopenharmony_ci			     const struct i915_oa_config *oa_config,
23868c2ecf20Sopenharmony_ci			     struct i915_active *active)
23878c2ecf20Sopenharmony_ci{
23888c2ecf20Sopenharmony_ci	struct flex regs[] = {
23898c2ecf20Sopenharmony_ci		{
23908c2ecf20Sopenharmony_ci			GEN8_R_PWR_CLK_STATE,
23918c2ecf20Sopenharmony_ci			CTX_R_PWR_CLK_STATE,
23928c2ecf20Sopenharmony_ci		},
23938c2ecf20Sopenharmony_ci	};
23948c2ecf20Sopenharmony_ci
23958c2ecf20Sopenharmony_ci	return oa_configure_all_contexts(stream,
23968c2ecf20Sopenharmony_ci					 regs, ARRAY_SIZE(regs),
23978c2ecf20Sopenharmony_ci					 active);
23988c2ecf20Sopenharmony_ci}
23998c2ecf20Sopenharmony_ci
24008c2ecf20Sopenharmony_cistatic int
24018c2ecf20Sopenharmony_cilrc_configure_all_contexts(struct i915_perf_stream *stream,
24028c2ecf20Sopenharmony_ci			   const struct i915_oa_config *oa_config,
24038c2ecf20Sopenharmony_ci			   struct i915_active *active)
24048c2ecf20Sopenharmony_ci{
24058c2ecf20Sopenharmony_ci	/* The MMIO offsets for Flex EU registers aren't contiguous */
24068c2ecf20Sopenharmony_ci	const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
24078c2ecf20Sopenharmony_ci#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
24088c2ecf20Sopenharmony_ci	struct flex regs[] = {
24098c2ecf20Sopenharmony_ci		{
24108c2ecf20Sopenharmony_ci			GEN8_R_PWR_CLK_STATE,
24118c2ecf20Sopenharmony_ci			CTX_R_PWR_CLK_STATE,
24128c2ecf20Sopenharmony_ci		},
24138c2ecf20Sopenharmony_ci		{
24148c2ecf20Sopenharmony_ci			GEN8_OACTXCONTROL,
24158c2ecf20Sopenharmony_ci			stream->perf->ctx_oactxctrl_offset + 1,
24168c2ecf20Sopenharmony_ci		},
24178c2ecf20Sopenharmony_ci		{ EU_PERF_CNTL0, ctx_flexeuN(0) },
24188c2ecf20Sopenharmony_ci		{ EU_PERF_CNTL1, ctx_flexeuN(1) },
24198c2ecf20Sopenharmony_ci		{ EU_PERF_CNTL2, ctx_flexeuN(2) },
24208c2ecf20Sopenharmony_ci		{ EU_PERF_CNTL3, ctx_flexeuN(3) },
24218c2ecf20Sopenharmony_ci		{ EU_PERF_CNTL4, ctx_flexeuN(4) },
24228c2ecf20Sopenharmony_ci		{ EU_PERF_CNTL5, ctx_flexeuN(5) },
24238c2ecf20Sopenharmony_ci		{ EU_PERF_CNTL6, ctx_flexeuN(6) },
24248c2ecf20Sopenharmony_ci	};
24258c2ecf20Sopenharmony_ci#undef ctx_flexeuN
24268c2ecf20Sopenharmony_ci	int i;
24278c2ecf20Sopenharmony_ci
24288c2ecf20Sopenharmony_ci	regs[1].value =
24298c2ecf20Sopenharmony_ci		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
24308c2ecf20Sopenharmony_ci		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
24318c2ecf20Sopenharmony_ci		GEN8_OA_COUNTER_RESUME;
24328c2ecf20Sopenharmony_ci
24338c2ecf20Sopenharmony_ci	for (i = 2; i < ARRAY_SIZE(regs); i++)
24348c2ecf20Sopenharmony_ci		regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
24358c2ecf20Sopenharmony_ci
24368c2ecf20Sopenharmony_ci	return oa_configure_all_contexts(stream,
24378c2ecf20Sopenharmony_ci					 regs, ARRAY_SIZE(regs),
24388c2ecf20Sopenharmony_ci					 active);
24398c2ecf20Sopenharmony_ci}
24408c2ecf20Sopenharmony_ci
24418c2ecf20Sopenharmony_cistatic int
24428c2ecf20Sopenharmony_cigen8_enable_metric_set(struct i915_perf_stream *stream,
24438c2ecf20Sopenharmony_ci		       struct i915_active *active)
24448c2ecf20Sopenharmony_ci{
24458c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
24468c2ecf20Sopenharmony_ci	struct i915_oa_config *oa_config = stream->oa_config;
24478c2ecf20Sopenharmony_ci	int ret;
24488c2ecf20Sopenharmony_ci
24498c2ecf20Sopenharmony_ci	/*
24508c2ecf20Sopenharmony_ci	 * We disable slice/unslice clock ratio change reports on SKL since
24518c2ecf20Sopenharmony_ci	 * they are too noisy. The HW generates a lot of redundant reports
24528c2ecf20Sopenharmony_ci	 * where the ratio hasn't really changed causing a lot of redundant
24538c2ecf20Sopenharmony_ci	 * work to processes and increasing the chances we'll hit buffer
24548c2ecf20Sopenharmony_ci	 * overruns.
24558c2ecf20Sopenharmony_ci	 *
24568c2ecf20Sopenharmony_ci	 * Although we don't currently use the 'disable overrun' OABUFFER
24578c2ecf20Sopenharmony_ci	 * feature it's worth noting that clock ratio reports have to be
24588c2ecf20Sopenharmony_ci	 * disabled before considering to use that feature since the HW doesn't
24598c2ecf20Sopenharmony_ci	 * correctly block these reports.
24608c2ecf20Sopenharmony_ci	 *
24618c2ecf20Sopenharmony_ci	 * Currently none of the high-level metrics we have depend on knowing
24628c2ecf20Sopenharmony_ci	 * this ratio to normalize.
24638c2ecf20Sopenharmony_ci	 *
24648c2ecf20Sopenharmony_ci	 * Note: This register is not power context saved and restored, but
24658c2ecf20Sopenharmony_ci	 * that's OK considering that we disable RC6 while the OA unit is
24668c2ecf20Sopenharmony_ci	 * enabled.
24678c2ecf20Sopenharmony_ci	 *
24688c2ecf20Sopenharmony_ci	 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
24698c2ecf20Sopenharmony_ci	 * be read back from automatically triggered reports, as part of the
24708c2ecf20Sopenharmony_ci	 * RPT_ID field.
24718c2ecf20Sopenharmony_ci	 */
24728c2ecf20Sopenharmony_ci	if (IS_GEN_RANGE(stream->perf->i915, 9, 11)) {
24738c2ecf20Sopenharmony_ci		intel_uncore_write(uncore, GEN8_OA_DEBUG,
24748c2ecf20Sopenharmony_ci				   _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
24758c2ecf20Sopenharmony_ci						      GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
24768c2ecf20Sopenharmony_ci	}
24778c2ecf20Sopenharmony_ci
24788c2ecf20Sopenharmony_ci	/*
24798c2ecf20Sopenharmony_ci	 * Update all contexts prior writing the mux configurations as we need
24808c2ecf20Sopenharmony_ci	 * to make sure all slices/subslices are ON before writing to NOA
24818c2ecf20Sopenharmony_ci	 * registers.
24828c2ecf20Sopenharmony_ci	 */
24838c2ecf20Sopenharmony_ci	ret = lrc_configure_all_contexts(stream, oa_config, active);
24848c2ecf20Sopenharmony_ci	if (ret)
24858c2ecf20Sopenharmony_ci		return ret;
24868c2ecf20Sopenharmony_ci
24878c2ecf20Sopenharmony_ci	return emit_oa_config(stream,
24888c2ecf20Sopenharmony_ci			      stream->oa_config, oa_context(stream),
24898c2ecf20Sopenharmony_ci			      active);
24908c2ecf20Sopenharmony_ci}
24918c2ecf20Sopenharmony_ci
24928c2ecf20Sopenharmony_cistatic u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
24938c2ecf20Sopenharmony_ci{
24948c2ecf20Sopenharmony_ci	return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
24958c2ecf20Sopenharmony_ci			     (stream->sample_flags & SAMPLE_OA_REPORT) ?
24968c2ecf20Sopenharmony_ci			     0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
24978c2ecf20Sopenharmony_ci}
24988c2ecf20Sopenharmony_ci
24998c2ecf20Sopenharmony_cistatic int
25008c2ecf20Sopenharmony_cigen12_enable_metric_set(struct i915_perf_stream *stream,
25018c2ecf20Sopenharmony_ci			struct i915_active *active)
25028c2ecf20Sopenharmony_ci{
25038c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
25048c2ecf20Sopenharmony_ci	struct i915_oa_config *oa_config = stream->oa_config;
25058c2ecf20Sopenharmony_ci	bool periodic = stream->periodic;
25068c2ecf20Sopenharmony_ci	u32 period_exponent = stream->period_exponent;
25078c2ecf20Sopenharmony_ci	int ret;
25088c2ecf20Sopenharmony_ci
25098c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
25108c2ecf20Sopenharmony_ci			   /* Disable clk ratio reports, like previous Gens. */
25118c2ecf20Sopenharmony_ci			   _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
25128c2ecf20Sopenharmony_ci					      GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
25138c2ecf20Sopenharmony_ci			   /*
25148c2ecf20Sopenharmony_ci			    * If the user didn't require OA reports, instruct
25158c2ecf20Sopenharmony_ci			    * the hardware not to emit ctx switch reports.
25168c2ecf20Sopenharmony_ci			    */
25178c2ecf20Sopenharmony_ci			   oag_report_ctx_switches(stream));
25188c2ecf20Sopenharmony_ci
25198c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
25208c2ecf20Sopenharmony_ci			   (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
25218c2ecf20Sopenharmony_ci			    GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
25228c2ecf20Sopenharmony_ci			    (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
25238c2ecf20Sopenharmony_ci			    : 0);
25248c2ecf20Sopenharmony_ci
25258c2ecf20Sopenharmony_ci	/*
25268c2ecf20Sopenharmony_ci	 * Update all contexts prior writing the mux configurations as we need
25278c2ecf20Sopenharmony_ci	 * to make sure all slices/subslices are ON before writing to NOA
25288c2ecf20Sopenharmony_ci	 * registers.
25298c2ecf20Sopenharmony_ci	 */
25308c2ecf20Sopenharmony_ci	ret = gen12_configure_all_contexts(stream, oa_config, active);
25318c2ecf20Sopenharmony_ci	if (ret)
25328c2ecf20Sopenharmony_ci		return ret;
25338c2ecf20Sopenharmony_ci
25348c2ecf20Sopenharmony_ci	/*
25358c2ecf20Sopenharmony_ci	 * For Gen12, performance counters are context
25368c2ecf20Sopenharmony_ci	 * saved/restored. Only enable it for the context that
25378c2ecf20Sopenharmony_ci	 * requested this.
25388c2ecf20Sopenharmony_ci	 */
25398c2ecf20Sopenharmony_ci	if (stream->ctx) {
25408c2ecf20Sopenharmony_ci		ret = gen12_configure_oar_context(stream, active);
25418c2ecf20Sopenharmony_ci		if (ret)
25428c2ecf20Sopenharmony_ci			return ret;
25438c2ecf20Sopenharmony_ci	}
25448c2ecf20Sopenharmony_ci
25458c2ecf20Sopenharmony_ci	return emit_oa_config(stream,
25468c2ecf20Sopenharmony_ci			      stream->oa_config, oa_context(stream),
25478c2ecf20Sopenharmony_ci			      active);
25488c2ecf20Sopenharmony_ci}
25498c2ecf20Sopenharmony_ci
25508c2ecf20Sopenharmony_cistatic void gen8_disable_metric_set(struct i915_perf_stream *stream)
25518c2ecf20Sopenharmony_ci{
25528c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
25538c2ecf20Sopenharmony_ci
25548c2ecf20Sopenharmony_ci	/* Reset all contexts' slices/subslices configurations. */
25558c2ecf20Sopenharmony_ci	lrc_configure_all_contexts(stream, NULL, NULL);
25568c2ecf20Sopenharmony_ci
25578c2ecf20Sopenharmony_ci	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
25588c2ecf20Sopenharmony_ci}
25598c2ecf20Sopenharmony_ci
25608c2ecf20Sopenharmony_cistatic void gen10_disable_metric_set(struct i915_perf_stream *stream)
25618c2ecf20Sopenharmony_ci{
25628c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
25638c2ecf20Sopenharmony_ci
25648c2ecf20Sopenharmony_ci	/* Reset all contexts' slices/subslices configurations. */
25658c2ecf20Sopenharmony_ci	lrc_configure_all_contexts(stream, NULL, NULL);
25668c2ecf20Sopenharmony_ci
25678c2ecf20Sopenharmony_ci	/* Make sure we disable noa to save power. */
25688c2ecf20Sopenharmony_ci	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
25698c2ecf20Sopenharmony_ci}
25708c2ecf20Sopenharmony_ci
25718c2ecf20Sopenharmony_cistatic void gen12_disable_metric_set(struct i915_perf_stream *stream)
25728c2ecf20Sopenharmony_ci{
25738c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
25748c2ecf20Sopenharmony_ci
25758c2ecf20Sopenharmony_ci	/* Reset all contexts' slices/subslices configurations. */
25768c2ecf20Sopenharmony_ci	gen12_configure_all_contexts(stream, NULL, NULL);
25778c2ecf20Sopenharmony_ci
25788c2ecf20Sopenharmony_ci	/* disable the context save/restore or OAR counters */
25798c2ecf20Sopenharmony_ci	if (stream->ctx)
25808c2ecf20Sopenharmony_ci		gen12_configure_oar_context(stream, NULL);
25818c2ecf20Sopenharmony_ci
25828c2ecf20Sopenharmony_ci	/* Make sure we disable noa to save power. */
25838c2ecf20Sopenharmony_ci	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
25848c2ecf20Sopenharmony_ci}
25858c2ecf20Sopenharmony_ci
25868c2ecf20Sopenharmony_cistatic void gen7_oa_enable(struct i915_perf_stream *stream)
25878c2ecf20Sopenharmony_ci{
25888c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
25898c2ecf20Sopenharmony_ci	struct i915_gem_context *ctx = stream->ctx;
25908c2ecf20Sopenharmony_ci	u32 ctx_id = stream->specific_ctx_id;
25918c2ecf20Sopenharmony_ci	bool periodic = stream->periodic;
25928c2ecf20Sopenharmony_ci	u32 period_exponent = stream->period_exponent;
25938c2ecf20Sopenharmony_ci	u32 report_format = stream->oa_buffer.format;
25948c2ecf20Sopenharmony_ci
25958c2ecf20Sopenharmony_ci	/*
25968c2ecf20Sopenharmony_ci	 * Reset buf pointers so we don't forward reports from before now.
25978c2ecf20Sopenharmony_ci	 *
25988c2ecf20Sopenharmony_ci	 * Think carefully if considering trying to avoid this, since it
25998c2ecf20Sopenharmony_ci	 * also ensures status flags and the buffer itself are cleared
26008c2ecf20Sopenharmony_ci	 * in error paths, and we have checks for invalid reports based
26018c2ecf20Sopenharmony_ci	 * on the assumption that certain fields are written to zeroed
26028c2ecf20Sopenharmony_ci	 * memory which this helps maintains.
26038c2ecf20Sopenharmony_ci	 */
26048c2ecf20Sopenharmony_ci	gen7_init_oa_buffer(stream);
26058c2ecf20Sopenharmony_ci
26068c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN7_OACONTROL,
26078c2ecf20Sopenharmony_ci			   (ctx_id & GEN7_OACONTROL_CTX_MASK) |
26088c2ecf20Sopenharmony_ci			   (period_exponent <<
26098c2ecf20Sopenharmony_ci			    GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
26108c2ecf20Sopenharmony_ci			   (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
26118c2ecf20Sopenharmony_ci			   (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
26128c2ecf20Sopenharmony_ci			   (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
26138c2ecf20Sopenharmony_ci			   GEN7_OACONTROL_ENABLE);
26148c2ecf20Sopenharmony_ci}
26158c2ecf20Sopenharmony_ci
26168c2ecf20Sopenharmony_cistatic void gen8_oa_enable(struct i915_perf_stream *stream)
26178c2ecf20Sopenharmony_ci{
26188c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
26198c2ecf20Sopenharmony_ci	u32 report_format = stream->oa_buffer.format;
26208c2ecf20Sopenharmony_ci
26218c2ecf20Sopenharmony_ci	/*
26228c2ecf20Sopenharmony_ci	 * Reset buf pointers so we don't forward reports from before now.
26238c2ecf20Sopenharmony_ci	 *
26248c2ecf20Sopenharmony_ci	 * Think carefully if considering trying to avoid this, since it
26258c2ecf20Sopenharmony_ci	 * also ensures status flags and the buffer itself are cleared
26268c2ecf20Sopenharmony_ci	 * in error paths, and we have checks for invalid reports based
26278c2ecf20Sopenharmony_ci	 * on the assumption that certain fields are written to zeroed
26288c2ecf20Sopenharmony_ci	 * memory which this helps maintains.
26298c2ecf20Sopenharmony_ci	 */
26308c2ecf20Sopenharmony_ci	gen8_init_oa_buffer(stream);
26318c2ecf20Sopenharmony_ci
26328c2ecf20Sopenharmony_ci	/*
26338c2ecf20Sopenharmony_ci	 * Note: we don't rely on the hardware to perform single context
26348c2ecf20Sopenharmony_ci	 * filtering and instead filter on the cpu based on the context-id
26358c2ecf20Sopenharmony_ci	 * field of reports
26368c2ecf20Sopenharmony_ci	 */
26378c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN8_OACONTROL,
26388c2ecf20Sopenharmony_ci			   (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
26398c2ecf20Sopenharmony_ci			   GEN8_OA_COUNTER_ENABLE);
26408c2ecf20Sopenharmony_ci}
26418c2ecf20Sopenharmony_ci
26428c2ecf20Sopenharmony_cistatic void gen12_oa_enable(struct i915_perf_stream *stream)
26438c2ecf20Sopenharmony_ci{
26448c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
26458c2ecf20Sopenharmony_ci	u32 report_format = stream->oa_buffer.format;
26468c2ecf20Sopenharmony_ci
26478c2ecf20Sopenharmony_ci	/*
26488c2ecf20Sopenharmony_ci	 * If we don't want OA reports from the OA buffer, then we don't even
26498c2ecf20Sopenharmony_ci	 * need to program the OAG unit.
26508c2ecf20Sopenharmony_ci	 */
26518c2ecf20Sopenharmony_ci	if (!(stream->sample_flags & SAMPLE_OA_REPORT))
26528c2ecf20Sopenharmony_ci		return;
26538c2ecf20Sopenharmony_ci
26548c2ecf20Sopenharmony_ci	gen12_init_oa_buffer(stream);
26558c2ecf20Sopenharmony_ci
26568c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
26578c2ecf20Sopenharmony_ci			   (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
26588c2ecf20Sopenharmony_ci			   GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
26598c2ecf20Sopenharmony_ci}
26608c2ecf20Sopenharmony_ci
26618c2ecf20Sopenharmony_ci/**
26628c2ecf20Sopenharmony_ci * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
26638c2ecf20Sopenharmony_ci * @stream: An i915 perf stream opened for OA metrics
26648c2ecf20Sopenharmony_ci *
26658c2ecf20Sopenharmony_ci * [Re]enables hardware periodic sampling according to the period configured
26668c2ecf20Sopenharmony_ci * when opening the stream. This also starts a hrtimer that will periodically
26678c2ecf20Sopenharmony_ci * check for data in the circular OA buffer for notifying userspace (e.g.
26688c2ecf20Sopenharmony_ci * during a read() or poll()).
26698c2ecf20Sopenharmony_ci */
26708c2ecf20Sopenharmony_cistatic void i915_oa_stream_enable(struct i915_perf_stream *stream)
26718c2ecf20Sopenharmony_ci{
26728c2ecf20Sopenharmony_ci	stream->pollin = false;
26738c2ecf20Sopenharmony_ci
26748c2ecf20Sopenharmony_ci	stream->perf->ops.oa_enable(stream);
26758c2ecf20Sopenharmony_ci
26768c2ecf20Sopenharmony_ci	if (stream->sample_flags & SAMPLE_OA_REPORT)
26778c2ecf20Sopenharmony_ci		hrtimer_start(&stream->poll_check_timer,
26788c2ecf20Sopenharmony_ci			      ns_to_ktime(stream->poll_oa_period),
26798c2ecf20Sopenharmony_ci			      HRTIMER_MODE_REL_PINNED);
26808c2ecf20Sopenharmony_ci}
26818c2ecf20Sopenharmony_ci
26828c2ecf20Sopenharmony_cistatic void gen7_oa_disable(struct i915_perf_stream *stream)
26838c2ecf20Sopenharmony_ci{
26848c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
26858c2ecf20Sopenharmony_ci
26868c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN7_OACONTROL, 0);
26878c2ecf20Sopenharmony_ci	if (intel_wait_for_register(uncore,
26888c2ecf20Sopenharmony_ci				    GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
26898c2ecf20Sopenharmony_ci				    50))
26908c2ecf20Sopenharmony_ci		drm_err(&stream->perf->i915->drm,
26918c2ecf20Sopenharmony_ci			"wait for OA to be disabled timed out\n");
26928c2ecf20Sopenharmony_ci}
26938c2ecf20Sopenharmony_ci
26948c2ecf20Sopenharmony_cistatic void gen8_oa_disable(struct i915_perf_stream *stream)
26958c2ecf20Sopenharmony_ci{
26968c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
26978c2ecf20Sopenharmony_ci
26988c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN8_OACONTROL, 0);
26998c2ecf20Sopenharmony_ci	if (intel_wait_for_register(uncore,
27008c2ecf20Sopenharmony_ci				    GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
27018c2ecf20Sopenharmony_ci				    50))
27028c2ecf20Sopenharmony_ci		drm_err(&stream->perf->i915->drm,
27038c2ecf20Sopenharmony_ci			"wait for OA to be disabled timed out\n");
27048c2ecf20Sopenharmony_ci}
27058c2ecf20Sopenharmony_ci
27068c2ecf20Sopenharmony_cistatic void gen12_oa_disable(struct i915_perf_stream *stream)
27078c2ecf20Sopenharmony_ci{
27088c2ecf20Sopenharmony_ci	struct intel_uncore *uncore = stream->uncore;
27098c2ecf20Sopenharmony_ci
27108c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
27118c2ecf20Sopenharmony_ci	if (intel_wait_for_register(uncore,
27128c2ecf20Sopenharmony_ci				    GEN12_OAG_OACONTROL,
27138c2ecf20Sopenharmony_ci				    GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
27148c2ecf20Sopenharmony_ci				    50))
27158c2ecf20Sopenharmony_ci		drm_err(&stream->perf->i915->drm,
27168c2ecf20Sopenharmony_ci			"wait for OA to be disabled timed out\n");
27178c2ecf20Sopenharmony_ci
27188c2ecf20Sopenharmony_ci	intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
27198c2ecf20Sopenharmony_ci	if (intel_wait_for_register(uncore,
27208c2ecf20Sopenharmony_ci				    GEN12_OA_TLB_INV_CR,
27218c2ecf20Sopenharmony_ci				    1, 0,
27228c2ecf20Sopenharmony_ci				    50))
27238c2ecf20Sopenharmony_ci		drm_err(&stream->perf->i915->drm,
27248c2ecf20Sopenharmony_ci			"wait for OA tlb invalidate timed out\n");
27258c2ecf20Sopenharmony_ci}
27268c2ecf20Sopenharmony_ci
27278c2ecf20Sopenharmony_ci/**
27288c2ecf20Sopenharmony_ci * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
27298c2ecf20Sopenharmony_ci * @stream: An i915 perf stream opened for OA metrics
27308c2ecf20Sopenharmony_ci *
27318c2ecf20Sopenharmony_ci * Stops the OA unit from periodically writing counter reports into the
27328c2ecf20Sopenharmony_ci * circular OA buffer. This also stops the hrtimer that periodically checks for
27338c2ecf20Sopenharmony_ci * data in the circular OA buffer, for notifying userspace.
27348c2ecf20Sopenharmony_ci */
27358c2ecf20Sopenharmony_cistatic void i915_oa_stream_disable(struct i915_perf_stream *stream)
27368c2ecf20Sopenharmony_ci{
27378c2ecf20Sopenharmony_ci	stream->perf->ops.oa_disable(stream);
27388c2ecf20Sopenharmony_ci
27398c2ecf20Sopenharmony_ci	if (stream->sample_flags & SAMPLE_OA_REPORT)
27408c2ecf20Sopenharmony_ci		hrtimer_cancel(&stream->poll_check_timer);
27418c2ecf20Sopenharmony_ci}
27428c2ecf20Sopenharmony_ci
27438c2ecf20Sopenharmony_cistatic const struct i915_perf_stream_ops i915_oa_stream_ops = {
27448c2ecf20Sopenharmony_ci	.destroy = i915_oa_stream_destroy,
27458c2ecf20Sopenharmony_ci	.enable = i915_oa_stream_enable,
27468c2ecf20Sopenharmony_ci	.disable = i915_oa_stream_disable,
27478c2ecf20Sopenharmony_ci	.wait_unlocked = i915_oa_wait_unlocked,
27488c2ecf20Sopenharmony_ci	.poll_wait = i915_oa_poll_wait,
27498c2ecf20Sopenharmony_ci	.read = i915_oa_read,
27508c2ecf20Sopenharmony_ci};
27518c2ecf20Sopenharmony_ci
27528c2ecf20Sopenharmony_cistatic int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
27538c2ecf20Sopenharmony_ci{
27548c2ecf20Sopenharmony_ci	struct i915_active *active;
27558c2ecf20Sopenharmony_ci	int err;
27568c2ecf20Sopenharmony_ci
27578c2ecf20Sopenharmony_ci	active = i915_active_create();
27588c2ecf20Sopenharmony_ci	if (!active)
27598c2ecf20Sopenharmony_ci		return -ENOMEM;
27608c2ecf20Sopenharmony_ci
27618c2ecf20Sopenharmony_ci	err = stream->perf->ops.enable_metric_set(stream, active);
27628c2ecf20Sopenharmony_ci	if (err == 0)
27638c2ecf20Sopenharmony_ci		__i915_active_wait(active, TASK_UNINTERRUPTIBLE);
27648c2ecf20Sopenharmony_ci
27658c2ecf20Sopenharmony_ci	i915_active_put(active);
27668c2ecf20Sopenharmony_ci	return err;
27678c2ecf20Sopenharmony_ci}
27688c2ecf20Sopenharmony_ci
27698c2ecf20Sopenharmony_cistatic void
27708c2ecf20Sopenharmony_ciget_default_sseu_config(struct intel_sseu *out_sseu,
27718c2ecf20Sopenharmony_ci			struct intel_engine_cs *engine)
27728c2ecf20Sopenharmony_ci{
27738c2ecf20Sopenharmony_ci	const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
27748c2ecf20Sopenharmony_ci
27758c2ecf20Sopenharmony_ci	*out_sseu = intel_sseu_from_device_info(devinfo_sseu);
27768c2ecf20Sopenharmony_ci
27778c2ecf20Sopenharmony_ci	if (IS_GEN(engine->i915, 11)) {
27788c2ecf20Sopenharmony_ci		/*
27798c2ecf20Sopenharmony_ci		 * We only need subslice count so it doesn't matter which ones
27808c2ecf20Sopenharmony_ci		 * we select - just turn off low bits in the amount of half of
27818c2ecf20Sopenharmony_ci		 * all available subslices per slice.
27828c2ecf20Sopenharmony_ci		 */
27838c2ecf20Sopenharmony_ci		out_sseu->subslice_mask =
27848c2ecf20Sopenharmony_ci			~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
27858c2ecf20Sopenharmony_ci		out_sseu->slice_mask = 0x1;
27868c2ecf20Sopenharmony_ci	}
27878c2ecf20Sopenharmony_ci}
27888c2ecf20Sopenharmony_ci
27898c2ecf20Sopenharmony_cistatic int
27908c2ecf20Sopenharmony_ciget_sseu_config(struct intel_sseu *out_sseu,
27918c2ecf20Sopenharmony_ci		struct intel_engine_cs *engine,
27928c2ecf20Sopenharmony_ci		const struct drm_i915_gem_context_param_sseu *drm_sseu)
27938c2ecf20Sopenharmony_ci{
27948c2ecf20Sopenharmony_ci	if (drm_sseu->engine.engine_class != engine->uabi_class ||
27958c2ecf20Sopenharmony_ci	    drm_sseu->engine.engine_instance != engine->uabi_instance)
27968c2ecf20Sopenharmony_ci		return -EINVAL;
27978c2ecf20Sopenharmony_ci
27988c2ecf20Sopenharmony_ci	return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
27998c2ecf20Sopenharmony_ci}
28008c2ecf20Sopenharmony_ci
28018c2ecf20Sopenharmony_ci/**
28028c2ecf20Sopenharmony_ci * i915_oa_stream_init - validate combined props for OA stream and init
28038c2ecf20Sopenharmony_ci * @stream: An i915 perf stream
28048c2ecf20Sopenharmony_ci * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
28058c2ecf20Sopenharmony_ci * @props: The property state that configures stream (individually validated)
28068c2ecf20Sopenharmony_ci *
28078c2ecf20Sopenharmony_ci * While read_properties_unlocked() validates properties in isolation it
28088c2ecf20Sopenharmony_ci * doesn't ensure that the combination necessarily makes sense.
28098c2ecf20Sopenharmony_ci *
28108c2ecf20Sopenharmony_ci * At this point it has been determined that userspace wants a stream of
28118c2ecf20Sopenharmony_ci * OA metrics, but still we need to further validate the combined
28128c2ecf20Sopenharmony_ci * properties are OK.
28138c2ecf20Sopenharmony_ci *
28148c2ecf20Sopenharmony_ci * If the configuration makes sense then we can allocate memory for
28158c2ecf20Sopenharmony_ci * a circular OA buffer and apply the requested metric set configuration.
28168c2ecf20Sopenharmony_ci *
28178c2ecf20Sopenharmony_ci * Returns: zero on success or a negative error code.
28188c2ecf20Sopenharmony_ci */
28198c2ecf20Sopenharmony_cistatic int i915_oa_stream_init(struct i915_perf_stream *stream,
28208c2ecf20Sopenharmony_ci			       struct drm_i915_perf_open_param *param,
28218c2ecf20Sopenharmony_ci			       struct perf_open_properties *props)
28228c2ecf20Sopenharmony_ci{
28238c2ecf20Sopenharmony_ci	struct drm_i915_private *i915 = stream->perf->i915;
28248c2ecf20Sopenharmony_ci	struct i915_perf *perf = stream->perf;
28258c2ecf20Sopenharmony_ci	int format_size;
28268c2ecf20Sopenharmony_ci	int ret;
28278c2ecf20Sopenharmony_ci
28288c2ecf20Sopenharmony_ci	if (!props->engine) {
28298c2ecf20Sopenharmony_ci		DRM_DEBUG("OA engine not specified\n");
28308c2ecf20Sopenharmony_ci		return -EINVAL;
28318c2ecf20Sopenharmony_ci	}
28328c2ecf20Sopenharmony_ci
28338c2ecf20Sopenharmony_ci	/*
28348c2ecf20Sopenharmony_ci	 * If the sysfs metrics/ directory wasn't registered for some
28358c2ecf20Sopenharmony_ci	 * reason then don't let userspace try their luck with config
28368c2ecf20Sopenharmony_ci	 * IDs
28378c2ecf20Sopenharmony_ci	 */
28388c2ecf20Sopenharmony_ci	if (!perf->metrics_kobj) {
28398c2ecf20Sopenharmony_ci		DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
28408c2ecf20Sopenharmony_ci		return -EINVAL;
28418c2ecf20Sopenharmony_ci	}
28428c2ecf20Sopenharmony_ci
28438c2ecf20Sopenharmony_ci	if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
28448c2ecf20Sopenharmony_ci	    (INTEL_GEN(perf->i915) < 12 || !stream->ctx)) {
28458c2ecf20Sopenharmony_ci		DRM_DEBUG("Only OA report sampling supported\n");
28468c2ecf20Sopenharmony_ci		return -EINVAL;
28478c2ecf20Sopenharmony_ci	}
28488c2ecf20Sopenharmony_ci
28498c2ecf20Sopenharmony_ci	if (!perf->ops.enable_metric_set) {
28508c2ecf20Sopenharmony_ci		DRM_DEBUG("OA unit not supported\n");
28518c2ecf20Sopenharmony_ci		return -ENODEV;
28528c2ecf20Sopenharmony_ci	}
28538c2ecf20Sopenharmony_ci
28548c2ecf20Sopenharmony_ci	/*
28558c2ecf20Sopenharmony_ci	 * To avoid the complexity of having to accurately filter
28568c2ecf20Sopenharmony_ci	 * counter reports and marshal to the appropriate client
28578c2ecf20Sopenharmony_ci	 * we currently only allow exclusive access
28588c2ecf20Sopenharmony_ci	 */
28598c2ecf20Sopenharmony_ci	if (perf->exclusive_stream) {
28608c2ecf20Sopenharmony_ci		DRM_DEBUG("OA unit already in use\n");
28618c2ecf20Sopenharmony_ci		return -EBUSY;
28628c2ecf20Sopenharmony_ci	}
28638c2ecf20Sopenharmony_ci
28648c2ecf20Sopenharmony_ci	if (!props->oa_format) {
28658c2ecf20Sopenharmony_ci		DRM_DEBUG("OA report format not specified\n");
28668c2ecf20Sopenharmony_ci		return -EINVAL;
28678c2ecf20Sopenharmony_ci	}
28688c2ecf20Sopenharmony_ci
28698c2ecf20Sopenharmony_ci	stream->engine = props->engine;
28708c2ecf20Sopenharmony_ci	stream->uncore = stream->engine->gt->uncore;
28718c2ecf20Sopenharmony_ci
28728c2ecf20Sopenharmony_ci	stream->sample_size = sizeof(struct drm_i915_perf_record_header);
28738c2ecf20Sopenharmony_ci
28748c2ecf20Sopenharmony_ci	format_size = perf->oa_formats[props->oa_format].size;
28758c2ecf20Sopenharmony_ci
28768c2ecf20Sopenharmony_ci	stream->sample_flags = props->sample_flags;
28778c2ecf20Sopenharmony_ci	stream->sample_size += format_size;
28788c2ecf20Sopenharmony_ci
28798c2ecf20Sopenharmony_ci	stream->oa_buffer.format_size = format_size;
28808c2ecf20Sopenharmony_ci	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
28818c2ecf20Sopenharmony_ci		return -EINVAL;
28828c2ecf20Sopenharmony_ci
28838c2ecf20Sopenharmony_ci	stream->hold_preemption = props->hold_preemption;
28848c2ecf20Sopenharmony_ci
28858c2ecf20Sopenharmony_ci	stream->oa_buffer.format =
28868c2ecf20Sopenharmony_ci		perf->oa_formats[props->oa_format].format;
28878c2ecf20Sopenharmony_ci
28888c2ecf20Sopenharmony_ci	stream->periodic = props->oa_periodic;
28898c2ecf20Sopenharmony_ci	if (stream->periodic)
28908c2ecf20Sopenharmony_ci		stream->period_exponent = props->oa_period_exponent;
28918c2ecf20Sopenharmony_ci
28928c2ecf20Sopenharmony_ci	if (stream->ctx) {
28938c2ecf20Sopenharmony_ci		ret = oa_get_render_ctx_id(stream);
28948c2ecf20Sopenharmony_ci		if (ret) {
28958c2ecf20Sopenharmony_ci			DRM_DEBUG("Invalid context id to filter with\n");
28968c2ecf20Sopenharmony_ci			return ret;
28978c2ecf20Sopenharmony_ci		}
28988c2ecf20Sopenharmony_ci	}
28998c2ecf20Sopenharmony_ci
29008c2ecf20Sopenharmony_ci	ret = alloc_noa_wait(stream);
29018c2ecf20Sopenharmony_ci	if (ret) {
29028c2ecf20Sopenharmony_ci		DRM_DEBUG("Unable to allocate NOA wait batch buffer\n");
29038c2ecf20Sopenharmony_ci		goto err_noa_wait_alloc;
29048c2ecf20Sopenharmony_ci	}
29058c2ecf20Sopenharmony_ci
29068c2ecf20Sopenharmony_ci	stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
29078c2ecf20Sopenharmony_ci	if (!stream->oa_config) {
29088c2ecf20Sopenharmony_ci		DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
29098c2ecf20Sopenharmony_ci		ret = -EINVAL;
29108c2ecf20Sopenharmony_ci		goto err_config;
29118c2ecf20Sopenharmony_ci	}
29128c2ecf20Sopenharmony_ci
29138c2ecf20Sopenharmony_ci	/* PRM - observability performance counters:
29148c2ecf20Sopenharmony_ci	 *
29158c2ecf20Sopenharmony_ci	 *   OACONTROL, performance counter enable, note:
29168c2ecf20Sopenharmony_ci	 *
29178c2ecf20Sopenharmony_ci	 *   "When this bit is set, in order to have coherent counts,
29188c2ecf20Sopenharmony_ci	 *   RC6 power state and trunk clock gating must be disabled.
29198c2ecf20Sopenharmony_ci	 *   This can be achieved by programming MMIO registers as
29208c2ecf20Sopenharmony_ci	 *   0xA094=0 and 0xA090[31]=1"
29218c2ecf20Sopenharmony_ci	 *
29228c2ecf20Sopenharmony_ci	 *   In our case we are expecting that taking pm + FORCEWAKE
29238c2ecf20Sopenharmony_ci	 *   references will effectively disable RC6.
29248c2ecf20Sopenharmony_ci	 */
29258c2ecf20Sopenharmony_ci	intel_engine_pm_get(stream->engine);
29268c2ecf20Sopenharmony_ci	intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
29278c2ecf20Sopenharmony_ci
29288c2ecf20Sopenharmony_ci	ret = alloc_oa_buffer(stream);
29298c2ecf20Sopenharmony_ci	if (ret)
29308c2ecf20Sopenharmony_ci		goto err_oa_buf_alloc;
29318c2ecf20Sopenharmony_ci
29328c2ecf20Sopenharmony_ci	stream->ops = &i915_oa_stream_ops;
29338c2ecf20Sopenharmony_ci
29348c2ecf20Sopenharmony_ci	perf->sseu = props->sseu;
29358c2ecf20Sopenharmony_ci	WRITE_ONCE(perf->exclusive_stream, stream);
29368c2ecf20Sopenharmony_ci
29378c2ecf20Sopenharmony_ci	ret = i915_perf_stream_enable_sync(stream);
29388c2ecf20Sopenharmony_ci	if (ret) {
29398c2ecf20Sopenharmony_ci		DRM_DEBUG("Unable to enable metric set\n");
29408c2ecf20Sopenharmony_ci		goto err_enable;
29418c2ecf20Sopenharmony_ci	}
29428c2ecf20Sopenharmony_ci
29438c2ecf20Sopenharmony_ci	DRM_DEBUG("opening stream oa config uuid=%s\n",
29448c2ecf20Sopenharmony_ci		  stream->oa_config->uuid);
29458c2ecf20Sopenharmony_ci
29468c2ecf20Sopenharmony_ci	hrtimer_init(&stream->poll_check_timer,
29478c2ecf20Sopenharmony_ci		     CLOCK_MONOTONIC, HRTIMER_MODE_REL);
29488c2ecf20Sopenharmony_ci	stream->poll_check_timer.function = oa_poll_check_timer_cb;
29498c2ecf20Sopenharmony_ci	init_waitqueue_head(&stream->poll_wq);
29508c2ecf20Sopenharmony_ci	spin_lock_init(&stream->oa_buffer.ptr_lock);
29518c2ecf20Sopenharmony_ci
29528c2ecf20Sopenharmony_ci	return 0;
29538c2ecf20Sopenharmony_ci
29548c2ecf20Sopenharmony_cierr_enable:
29558c2ecf20Sopenharmony_ci	WRITE_ONCE(perf->exclusive_stream, NULL);
29568c2ecf20Sopenharmony_ci	perf->ops.disable_metric_set(stream);
29578c2ecf20Sopenharmony_ci
29588c2ecf20Sopenharmony_ci	free_oa_buffer(stream);
29598c2ecf20Sopenharmony_ci
29608c2ecf20Sopenharmony_cierr_oa_buf_alloc:
29618c2ecf20Sopenharmony_ci	free_oa_configs(stream);
29628c2ecf20Sopenharmony_ci
29638c2ecf20Sopenharmony_ci	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
29648c2ecf20Sopenharmony_ci	intel_engine_pm_put(stream->engine);
29658c2ecf20Sopenharmony_ci
29668c2ecf20Sopenharmony_cierr_config:
29678c2ecf20Sopenharmony_ci	free_noa_wait(stream);
29688c2ecf20Sopenharmony_ci
29698c2ecf20Sopenharmony_cierr_noa_wait_alloc:
29708c2ecf20Sopenharmony_ci	if (stream->ctx)
29718c2ecf20Sopenharmony_ci		oa_put_render_ctx_id(stream);
29728c2ecf20Sopenharmony_ci
29738c2ecf20Sopenharmony_ci	return ret;
29748c2ecf20Sopenharmony_ci}
29758c2ecf20Sopenharmony_ci
29768c2ecf20Sopenharmony_civoid i915_oa_init_reg_state(const struct intel_context *ce,
29778c2ecf20Sopenharmony_ci			    const struct intel_engine_cs *engine)
29788c2ecf20Sopenharmony_ci{
29798c2ecf20Sopenharmony_ci	struct i915_perf_stream *stream;
29808c2ecf20Sopenharmony_ci
29818c2ecf20Sopenharmony_ci	if (engine->class != RENDER_CLASS)
29828c2ecf20Sopenharmony_ci		return;
29838c2ecf20Sopenharmony_ci
29848c2ecf20Sopenharmony_ci	/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
29858c2ecf20Sopenharmony_ci	stream = READ_ONCE(engine->i915->perf.exclusive_stream);
29868c2ecf20Sopenharmony_ci	if (stream && INTEL_GEN(stream->perf->i915) < 12)
29878c2ecf20Sopenharmony_ci		gen8_update_reg_state_unlocked(ce, stream);
29888c2ecf20Sopenharmony_ci}
29898c2ecf20Sopenharmony_ci
29908c2ecf20Sopenharmony_ci/**
29918c2ecf20Sopenharmony_ci * i915_perf_read - handles read() FOP for i915 perf stream FDs
29928c2ecf20Sopenharmony_ci * @file: An i915 perf stream file
29938c2ecf20Sopenharmony_ci * @buf: destination buffer given by userspace
29948c2ecf20Sopenharmony_ci * @count: the number of bytes userspace wants to read
29958c2ecf20Sopenharmony_ci * @ppos: (inout) file seek position (unused)
29968c2ecf20Sopenharmony_ci *
29978c2ecf20Sopenharmony_ci * The entry point for handling a read() on a stream file descriptor from
29988c2ecf20Sopenharmony_ci * userspace. Most of the work is left to the i915_perf_read_locked() and
29998c2ecf20Sopenharmony_ci * &i915_perf_stream_ops->read but to save having stream implementations (of
30008c2ecf20Sopenharmony_ci * which we might have multiple later) we handle blocking read here.
30018c2ecf20Sopenharmony_ci *
30028c2ecf20Sopenharmony_ci * We can also consistently treat trying to read from a disabled stream
30038c2ecf20Sopenharmony_ci * as an IO error so implementations can assume the stream is enabled
30048c2ecf20Sopenharmony_ci * while reading.
30058c2ecf20Sopenharmony_ci *
30068c2ecf20Sopenharmony_ci * Returns: The number of bytes copied or a negative error code on failure.
30078c2ecf20Sopenharmony_ci */
30088c2ecf20Sopenharmony_cistatic ssize_t i915_perf_read(struct file *file,
30098c2ecf20Sopenharmony_ci			      char __user *buf,
30108c2ecf20Sopenharmony_ci			      size_t count,
30118c2ecf20Sopenharmony_ci			      loff_t *ppos)
30128c2ecf20Sopenharmony_ci{
30138c2ecf20Sopenharmony_ci	struct i915_perf_stream *stream = file->private_data;
30148c2ecf20Sopenharmony_ci	struct i915_perf *perf = stream->perf;
30158c2ecf20Sopenharmony_ci	size_t offset = 0;
30168c2ecf20Sopenharmony_ci	int ret;
30178c2ecf20Sopenharmony_ci
30188c2ecf20Sopenharmony_ci	/* To ensure it's handled consistently we simply treat all reads of a
30198c2ecf20Sopenharmony_ci	 * disabled stream as an error. In particular it might otherwise lead
30208c2ecf20Sopenharmony_ci	 * to a deadlock for blocking file descriptors...
30218c2ecf20Sopenharmony_ci	 */
30228c2ecf20Sopenharmony_ci	if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
30238c2ecf20Sopenharmony_ci		return -EIO;
30248c2ecf20Sopenharmony_ci
30258c2ecf20Sopenharmony_ci	if (!(file->f_flags & O_NONBLOCK)) {
30268c2ecf20Sopenharmony_ci		/* There's the small chance of false positives from
30278c2ecf20Sopenharmony_ci		 * stream->ops->wait_unlocked.
30288c2ecf20Sopenharmony_ci		 *
30298c2ecf20Sopenharmony_ci		 * E.g. with single context filtering since we only wait until
30308c2ecf20Sopenharmony_ci		 * oabuffer has >= 1 report we don't immediately know whether
30318c2ecf20Sopenharmony_ci		 * any reports really belong to the current context
30328c2ecf20Sopenharmony_ci		 */
30338c2ecf20Sopenharmony_ci		do {
30348c2ecf20Sopenharmony_ci			ret = stream->ops->wait_unlocked(stream);
30358c2ecf20Sopenharmony_ci			if (ret)
30368c2ecf20Sopenharmony_ci				return ret;
30378c2ecf20Sopenharmony_ci
30388c2ecf20Sopenharmony_ci			mutex_lock(&perf->lock);
30398c2ecf20Sopenharmony_ci			ret = stream->ops->read(stream, buf, count, &offset);
30408c2ecf20Sopenharmony_ci			mutex_unlock(&perf->lock);
30418c2ecf20Sopenharmony_ci		} while (!offset && !ret);
30428c2ecf20Sopenharmony_ci	} else {
30438c2ecf20Sopenharmony_ci		mutex_lock(&perf->lock);
30448c2ecf20Sopenharmony_ci		ret = stream->ops->read(stream, buf, count, &offset);
30458c2ecf20Sopenharmony_ci		mutex_unlock(&perf->lock);
30468c2ecf20Sopenharmony_ci	}
30478c2ecf20Sopenharmony_ci
30488c2ecf20Sopenharmony_ci	/* We allow the poll checking to sometimes report false positive EPOLLIN
30498c2ecf20Sopenharmony_ci	 * events where we might actually report EAGAIN on read() if there's
30508c2ecf20Sopenharmony_ci	 * not really any data available. In this situation though we don't
30518c2ecf20Sopenharmony_ci	 * want to enter a busy loop between poll() reporting a EPOLLIN event
30528c2ecf20Sopenharmony_ci	 * and read() returning -EAGAIN. Clearing the oa.pollin state here
30538c2ecf20Sopenharmony_ci	 * effectively ensures we back off until the next hrtimer callback
30548c2ecf20Sopenharmony_ci	 * before reporting another EPOLLIN event.
30558c2ecf20Sopenharmony_ci	 * The exception to this is if ops->read() returned -ENOSPC which means
30568c2ecf20Sopenharmony_ci	 * that more OA data is available than could fit in the user provided
30578c2ecf20Sopenharmony_ci	 * buffer. In this case we want the next poll() call to not block.
30588c2ecf20Sopenharmony_ci	 */
30598c2ecf20Sopenharmony_ci	if (ret != -ENOSPC)
30608c2ecf20Sopenharmony_ci		stream->pollin = false;
30618c2ecf20Sopenharmony_ci
30628c2ecf20Sopenharmony_ci	/* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
30638c2ecf20Sopenharmony_ci	return offset ?: (ret ?: -EAGAIN);
30648c2ecf20Sopenharmony_ci}
30658c2ecf20Sopenharmony_ci
30668c2ecf20Sopenharmony_cistatic enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
30678c2ecf20Sopenharmony_ci{
30688c2ecf20Sopenharmony_ci	struct i915_perf_stream *stream =
30698c2ecf20Sopenharmony_ci		container_of(hrtimer, typeof(*stream), poll_check_timer);
30708c2ecf20Sopenharmony_ci
30718c2ecf20Sopenharmony_ci	if (oa_buffer_check_unlocked(stream)) {
30728c2ecf20Sopenharmony_ci		stream->pollin = true;
30738c2ecf20Sopenharmony_ci		wake_up(&stream->poll_wq);
30748c2ecf20Sopenharmony_ci	}
30758c2ecf20Sopenharmony_ci
30768c2ecf20Sopenharmony_ci	hrtimer_forward_now(hrtimer,
30778c2ecf20Sopenharmony_ci			    ns_to_ktime(stream->poll_oa_period));
30788c2ecf20Sopenharmony_ci
30798c2ecf20Sopenharmony_ci	return HRTIMER_RESTART;
30808c2ecf20Sopenharmony_ci}
30818c2ecf20Sopenharmony_ci
30828c2ecf20Sopenharmony_ci/**
30838c2ecf20Sopenharmony_ci * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
30848c2ecf20Sopenharmony_ci * @stream: An i915 perf stream
30858c2ecf20Sopenharmony_ci * @file: An i915 perf stream file
30868c2ecf20Sopenharmony_ci * @wait: poll() state table
30878c2ecf20Sopenharmony_ci *
30888c2ecf20Sopenharmony_ci * For handling userspace polling on an i915 perf stream, this calls through to
30898c2ecf20Sopenharmony_ci * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
30908c2ecf20Sopenharmony_ci * will be woken for new stream data.
30918c2ecf20Sopenharmony_ci *
30928c2ecf20Sopenharmony_ci * Note: The &perf->lock mutex has been taken to serialize
30938c2ecf20Sopenharmony_ci * with any non-file-operation driver hooks.
30948c2ecf20Sopenharmony_ci *
30958c2ecf20Sopenharmony_ci * Returns: any poll events that are ready without sleeping
30968c2ecf20Sopenharmony_ci */
30978c2ecf20Sopenharmony_cistatic __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
30988c2ecf20Sopenharmony_ci				      struct file *file,
30998c2ecf20Sopenharmony_ci				      poll_table *wait)
31008c2ecf20Sopenharmony_ci{
31018c2ecf20Sopenharmony_ci	__poll_t events = 0;
31028c2ecf20Sopenharmony_ci
31038c2ecf20Sopenharmony_ci	stream->ops->poll_wait(stream, file, wait);
31048c2ecf20Sopenharmony_ci
31058c2ecf20Sopenharmony_ci	/* Note: we don't explicitly check whether there's something to read
31068c2ecf20Sopenharmony_ci	 * here since this path may be very hot depending on what else
31078c2ecf20Sopenharmony_ci	 * userspace is polling, or on the timeout in use. We rely solely on
31088c2ecf20Sopenharmony_ci	 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
31098c2ecf20Sopenharmony_ci	 * samples to read.
31108c2ecf20Sopenharmony_ci	 */
31118c2ecf20Sopenharmony_ci	if (stream->pollin)
31128c2ecf20Sopenharmony_ci		events |= EPOLLIN;
31138c2ecf20Sopenharmony_ci
31148c2ecf20Sopenharmony_ci	return events;
31158c2ecf20Sopenharmony_ci}
31168c2ecf20Sopenharmony_ci
31178c2ecf20Sopenharmony_ci/**
31188c2ecf20Sopenharmony_ci * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
31198c2ecf20Sopenharmony_ci * @file: An i915 perf stream file
31208c2ecf20Sopenharmony_ci * @wait: poll() state table
31218c2ecf20Sopenharmony_ci *
31228c2ecf20Sopenharmony_ci * For handling userspace polling on an i915 perf stream, this ensures
31238c2ecf20Sopenharmony_ci * poll_wait() gets called with a wait queue that will be woken for new stream
31248c2ecf20Sopenharmony_ci * data.
31258c2ecf20Sopenharmony_ci *
31268c2ecf20Sopenharmony_ci * Note: Implementation deferred to i915_perf_poll_locked()
31278c2ecf20Sopenharmony_ci *
31288c2ecf20Sopenharmony_ci * Returns: any poll events that are ready without sleeping
31298c2ecf20Sopenharmony_ci */
31308c2ecf20Sopenharmony_cistatic __poll_t i915_perf_poll(struct file *file, poll_table *wait)
31318c2ecf20Sopenharmony_ci{
31328c2ecf20Sopenharmony_ci	struct i915_perf_stream *stream = file->private_data;
31338c2ecf20Sopenharmony_ci	struct i915_perf *perf = stream->perf;
31348c2ecf20Sopenharmony_ci	__poll_t ret;
31358c2ecf20Sopenharmony_ci
31368c2ecf20Sopenharmony_ci	mutex_lock(&perf->lock);
31378c2ecf20Sopenharmony_ci	ret = i915_perf_poll_locked(stream, file, wait);
31388c2ecf20Sopenharmony_ci	mutex_unlock(&perf->lock);
31398c2ecf20Sopenharmony_ci
31408c2ecf20Sopenharmony_ci	return ret;
31418c2ecf20Sopenharmony_ci}
31428c2ecf20Sopenharmony_ci
31438c2ecf20Sopenharmony_ci/**
31448c2ecf20Sopenharmony_ci * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
31458c2ecf20Sopenharmony_ci * @stream: A disabled i915 perf stream
31468c2ecf20Sopenharmony_ci *
31478c2ecf20Sopenharmony_ci * [Re]enables the associated capture of data for this stream.
31488c2ecf20Sopenharmony_ci *
31498c2ecf20Sopenharmony_ci * If a stream was previously enabled then there's currently no intention
31508c2ecf20Sopenharmony_ci * to provide userspace any guarantee about the preservation of previously
31518c2ecf20Sopenharmony_ci * buffered data.
31528c2ecf20Sopenharmony_ci */
31538c2ecf20Sopenharmony_cistatic void i915_perf_enable_locked(struct i915_perf_stream *stream)
31548c2ecf20Sopenharmony_ci{
31558c2ecf20Sopenharmony_ci	if (stream->enabled)
31568c2ecf20Sopenharmony_ci		return;
31578c2ecf20Sopenharmony_ci
31588c2ecf20Sopenharmony_ci	/* Allow stream->ops->enable() to refer to this */
31598c2ecf20Sopenharmony_ci	stream->enabled = true;
31608c2ecf20Sopenharmony_ci
31618c2ecf20Sopenharmony_ci	if (stream->ops->enable)
31628c2ecf20Sopenharmony_ci		stream->ops->enable(stream);
31638c2ecf20Sopenharmony_ci
31648c2ecf20Sopenharmony_ci	if (stream->hold_preemption)
31658c2ecf20Sopenharmony_ci		intel_context_set_nopreempt(stream->pinned_ctx);
31668c2ecf20Sopenharmony_ci}
31678c2ecf20Sopenharmony_ci
31688c2ecf20Sopenharmony_ci/**
31698c2ecf20Sopenharmony_ci * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
31708c2ecf20Sopenharmony_ci * @stream: An enabled i915 perf stream
31718c2ecf20Sopenharmony_ci *
31728c2ecf20Sopenharmony_ci * Disables the associated capture of data for this stream.
31738c2ecf20Sopenharmony_ci *
31748c2ecf20Sopenharmony_ci * The intention is that disabling an re-enabling a stream will ideally be
31758c2ecf20Sopenharmony_ci * cheaper than destroying and re-opening a stream with the same configuration,
31768c2ecf20Sopenharmony_ci * though there are no formal guarantees about what state or buffered data
31778c2ecf20Sopenharmony_ci * must be retained between disabling and re-enabling a stream.
31788c2ecf20Sopenharmony_ci *
31798c2ecf20Sopenharmony_ci * Note: while a stream is disabled it's considered an error for userspace
31808c2ecf20Sopenharmony_ci * to attempt to read from the stream (-EIO).
31818c2ecf20Sopenharmony_ci */
31828c2ecf20Sopenharmony_cistatic void i915_perf_disable_locked(struct i915_perf_stream *stream)
31838c2ecf20Sopenharmony_ci{
31848c2ecf20Sopenharmony_ci	if (!stream->enabled)
31858c2ecf20Sopenharmony_ci		return;
31868c2ecf20Sopenharmony_ci
31878c2ecf20Sopenharmony_ci	/* Allow stream->ops->disable() to refer to this */
31888c2ecf20Sopenharmony_ci	stream->enabled = false;
31898c2ecf20Sopenharmony_ci
31908c2ecf20Sopenharmony_ci	if (stream->hold_preemption)
31918c2ecf20Sopenharmony_ci		intel_context_clear_nopreempt(stream->pinned_ctx);
31928c2ecf20Sopenharmony_ci
31938c2ecf20Sopenharmony_ci	if (stream->ops->disable)
31948c2ecf20Sopenharmony_ci		stream->ops->disable(stream);
31958c2ecf20Sopenharmony_ci}
31968c2ecf20Sopenharmony_ci
31978c2ecf20Sopenharmony_cistatic long i915_perf_config_locked(struct i915_perf_stream *stream,
31988c2ecf20Sopenharmony_ci				    unsigned long metrics_set)
31998c2ecf20Sopenharmony_ci{
32008c2ecf20Sopenharmony_ci	struct i915_oa_config *config;
32018c2ecf20Sopenharmony_ci	long ret = stream->oa_config->id;
32028c2ecf20Sopenharmony_ci
32038c2ecf20Sopenharmony_ci	config = i915_perf_get_oa_config(stream->perf, metrics_set);
32048c2ecf20Sopenharmony_ci	if (!config)
32058c2ecf20Sopenharmony_ci		return -EINVAL;
32068c2ecf20Sopenharmony_ci
32078c2ecf20Sopenharmony_ci	if (config != stream->oa_config) {
32088c2ecf20Sopenharmony_ci		int err;
32098c2ecf20Sopenharmony_ci
32108c2ecf20Sopenharmony_ci		/*
32118c2ecf20Sopenharmony_ci		 * If OA is bound to a specific context, emit the
32128c2ecf20Sopenharmony_ci		 * reconfiguration inline from that context. The update
32138c2ecf20Sopenharmony_ci		 * will then be ordered with respect to submission on that
32148c2ecf20Sopenharmony_ci		 * context.
32158c2ecf20Sopenharmony_ci		 *
32168c2ecf20Sopenharmony_ci		 * When set globally, we use a low priority kernel context,
32178c2ecf20Sopenharmony_ci		 * so it will effectively take effect when idle.
32188c2ecf20Sopenharmony_ci		 */
32198c2ecf20Sopenharmony_ci		err = emit_oa_config(stream, config, oa_context(stream), NULL);
32208c2ecf20Sopenharmony_ci		if (!err)
32218c2ecf20Sopenharmony_ci			config = xchg(&stream->oa_config, config);
32228c2ecf20Sopenharmony_ci		else
32238c2ecf20Sopenharmony_ci			ret = err;
32248c2ecf20Sopenharmony_ci	}
32258c2ecf20Sopenharmony_ci
32268c2ecf20Sopenharmony_ci	i915_oa_config_put(config);
32278c2ecf20Sopenharmony_ci
32288c2ecf20Sopenharmony_ci	return ret;
32298c2ecf20Sopenharmony_ci}
32308c2ecf20Sopenharmony_ci
32318c2ecf20Sopenharmony_ci/**
32328c2ecf20Sopenharmony_ci * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
32338c2ecf20Sopenharmony_ci * @stream: An i915 perf stream
32348c2ecf20Sopenharmony_ci * @cmd: the ioctl request
32358c2ecf20Sopenharmony_ci * @arg: the ioctl data
32368c2ecf20Sopenharmony_ci *
32378c2ecf20Sopenharmony_ci * Note: The &perf->lock mutex has been taken to serialize
32388c2ecf20Sopenharmony_ci * with any non-file-operation driver hooks.
32398c2ecf20Sopenharmony_ci *
32408c2ecf20Sopenharmony_ci * Returns: zero on success or a negative error code. Returns -EINVAL for
32418c2ecf20Sopenharmony_ci * an unknown ioctl request.
32428c2ecf20Sopenharmony_ci */
32438c2ecf20Sopenharmony_cistatic long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
32448c2ecf20Sopenharmony_ci				   unsigned int cmd,
32458c2ecf20Sopenharmony_ci				   unsigned long arg)
32468c2ecf20Sopenharmony_ci{
32478c2ecf20Sopenharmony_ci	switch (cmd) {
32488c2ecf20Sopenharmony_ci	case I915_PERF_IOCTL_ENABLE:
32498c2ecf20Sopenharmony_ci		i915_perf_enable_locked(stream);
32508c2ecf20Sopenharmony_ci		return 0;
32518c2ecf20Sopenharmony_ci	case I915_PERF_IOCTL_DISABLE:
32528c2ecf20Sopenharmony_ci		i915_perf_disable_locked(stream);
32538c2ecf20Sopenharmony_ci		return 0;
32548c2ecf20Sopenharmony_ci	case I915_PERF_IOCTL_CONFIG:
32558c2ecf20Sopenharmony_ci		return i915_perf_config_locked(stream, arg);
32568c2ecf20Sopenharmony_ci	}
32578c2ecf20Sopenharmony_ci
32588c2ecf20Sopenharmony_ci	return -EINVAL;
32598c2ecf20Sopenharmony_ci}
32608c2ecf20Sopenharmony_ci
32618c2ecf20Sopenharmony_ci/**
32628c2ecf20Sopenharmony_ci * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
32638c2ecf20Sopenharmony_ci * @file: An i915 perf stream file
32648c2ecf20Sopenharmony_ci * @cmd: the ioctl request
32658c2ecf20Sopenharmony_ci * @arg: the ioctl data
32668c2ecf20Sopenharmony_ci *
32678c2ecf20Sopenharmony_ci * Implementation deferred to i915_perf_ioctl_locked().
32688c2ecf20Sopenharmony_ci *
32698c2ecf20Sopenharmony_ci * Returns: zero on success or a negative error code. Returns -EINVAL for
32708c2ecf20Sopenharmony_ci * an unknown ioctl request.
32718c2ecf20Sopenharmony_ci */
32728c2ecf20Sopenharmony_cistatic long i915_perf_ioctl(struct file *file,
32738c2ecf20Sopenharmony_ci			    unsigned int cmd,
32748c2ecf20Sopenharmony_ci			    unsigned long arg)
32758c2ecf20Sopenharmony_ci{
32768c2ecf20Sopenharmony_ci	struct i915_perf_stream *stream = file->private_data;
32778c2ecf20Sopenharmony_ci	struct i915_perf *perf = stream->perf;
32788c2ecf20Sopenharmony_ci	long ret;
32798c2ecf20Sopenharmony_ci
32808c2ecf20Sopenharmony_ci	mutex_lock(&perf->lock);
32818c2ecf20Sopenharmony_ci	ret = i915_perf_ioctl_locked(stream, cmd, arg);
32828c2ecf20Sopenharmony_ci	mutex_unlock(&perf->lock);
32838c2ecf20Sopenharmony_ci
32848c2ecf20Sopenharmony_ci	return ret;
32858c2ecf20Sopenharmony_ci}
32868c2ecf20Sopenharmony_ci
32878c2ecf20Sopenharmony_ci/**
32888c2ecf20Sopenharmony_ci * i915_perf_destroy_locked - destroy an i915 perf stream
32898c2ecf20Sopenharmony_ci * @stream: An i915 perf stream
32908c2ecf20Sopenharmony_ci *
32918c2ecf20Sopenharmony_ci * Frees all resources associated with the given i915 perf @stream, disabling
32928c2ecf20Sopenharmony_ci * any associated data capture in the process.
32938c2ecf20Sopenharmony_ci *
32948c2ecf20Sopenharmony_ci * Note: The &perf->lock mutex has been taken to serialize
32958c2ecf20Sopenharmony_ci * with any non-file-operation driver hooks.
32968c2ecf20Sopenharmony_ci */
32978c2ecf20Sopenharmony_cistatic void i915_perf_destroy_locked(struct i915_perf_stream *stream)
32988c2ecf20Sopenharmony_ci{
32998c2ecf20Sopenharmony_ci	if (stream->enabled)
33008c2ecf20Sopenharmony_ci		i915_perf_disable_locked(stream);
33018c2ecf20Sopenharmony_ci
33028c2ecf20Sopenharmony_ci	if (stream->ops->destroy)
33038c2ecf20Sopenharmony_ci		stream->ops->destroy(stream);
33048c2ecf20Sopenharmony_ci
33058c2ecf20Sopenharmony_ci	if (stream->ctx)
33068c2ecf20Sopenharmony_ci		i915_gem_context_put(stream->ctx);
33078c2ecf20Sopenharmony_ci
33088c2ecf20Sopenharmony_ci	kfree(stream);
33098c2ecf20Sopenharmony_ci}
33108c2ecf20Sopenharmony_ci
33118c2ecf20Sopenharmony_ci/**
33128c2ecf20Sopenharmony_ci * i915_perf_release - handles userspace close() of a stream file
33138c2ecf20Sopenharmony_ci * @inode: anonymous inode associated with file
33148c2ecf20Sopenharmony_ci * @file: An i915 perf stream file
33158c2ecf20Sopenharmony_ci *
33168c2ecf20Sopenharmony_ci * Cleans up any resources associated with an open i915 perf stream file.
33178c2ecf20Sopenharmony_ci *
33188c2ecf20Sopenharmony_ci * NB: close() can't really fail from the userspace point of view.
33198c2ecf20Sopenharmony_ci *
33208c2ecf20Sopenharmony_ci * Returns: zero on success or a negative error code.
33218c2ecf20Sopenharmony_ci */
33228c2ecf20Sopenharmony_cistatic int i915_perf_release(struct inode *inode, struct file *file)
33238c2ecf20Sopenharmony_ci{
33248c2ecf20Sopenharmony_ci	struct i915_perf_stream *stream = file->private_data;
33258c2ecf20Sopenharmony_ci	struct i915_perf *perf = stream->perf;
33268c2ecf20Sopenharmony_ci
33278c2ecf20Sopenharmony_ci	mutex_lock(&perf->lock);
33288c2ecf20Sopenharmony_ci	i915_perf_destroy_locked(stream);
33298c2ecf20Sopenharmony_ci	mutex_unlock(&perf->lock);
33308c2ecf20Sopenharmony_ci
33318c2ecf20Sopenharmony_ci	/* Release the reference the perf stream kept on the driver. */
33328c2ecf20Sopenharmony_ci	drm_dev_put(&perf->i915->drm);
33338c2ecf20Sopenharmony_ci
33348c2ecf20Sopenharmony_ci	return 0;
33358c2ecf20Sopenharmony_ci}
33368c2ecf20Sopenharmony_ci
33378c2ecf20Sopenharmony_ci
33388c2ecf20Sopenharmony_cistatic const struct file_operations fops = {
33398c2ecf20Sopenharmony_ci	.owner		= THIS_MODULE,
33408c2ecf20Sopenharmony_ci	.llseek		= no_llseek,
33418c2ecf20Sopenharmony_ci	.release	= i915_perf_release,
33428c2ecf20Sopenharmony_ci	.poll		= i915_perf_poll,
33438c2ecf20Sopenharmony_ci	.read		= i915_perf_read,
33448c2ecf20Sopenharmony_ci	.unlocked_ioctl	= i915_perf_ioctl,
33458c2ecf20Sopenharmony_ci	/* Our ioctl have no arguments, so it's safe to use the same function
33468c2ecf20Sopenharmony_ci	 * to handle 32bits compatibility.
33478c2ecf20Sopenharmony_ci	 */
33488c2ecf20Sopenharmony_ci	.compat_ioctl   = i915_perf_ioctl,
33498c2ecf20Sopenharmony_ci};
33508c2ecf20Sopenharmony_ci
33518c2ecf20Sopenharmony_ci
33528c2ecf20Sopenharmony_ci/**
33538c2ecf20Sopenharmony_ci * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
33548c2ecf20Sopenharmony_ci * @perf: i915 perf instance
33558c2ecf20Sopenharmony_ci * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
33568c2ecf20Sopenharmony_ci * @props: individually validated u64 property value pairs
33578c2ecf20Sopenharmony_ci * @file: drm file
33588c2ecf20Sopenharmony_ci *
33598c2ecf20Sopenharmony_ci * See i915_perf_ioctl_open() for interface details.
33608c2ecf20Sopenharmony_ci *
33618c2ecf20Sopenharmony_ci * Implements further stream config validation and stream initialization on
33628c2ecf20Sopenharmony_ci * behalf of i915_perf_open_ioctl() with the &perf->lock mutex
33638c2ecf20Sopenharmony_ci * taken to serialize with any non-file-operation driver hooks.
33648c2ecf20Sopenharmony_ci *
33658c2ecf20Sopenharmony_ci * Note: at this point the @props have only been validated in isolation and
33668c2ecf20Sopenharmony_ci * it's still necessary to validate that the combination of properties makes
33678c2ecf20Sopenharmony_ci * sense.
33688c2ecf20Sopenharmony_ci *
33698c2ecf20Sopenharmony_ci * In the case where userspace is interested in OA unit metrics then further
33708c2ecf20Sopenharmony_ci * config validation and stream initialization details will be handled by
33718c2ecf20Sopenharmony_ci * i915_oa_stream_init(). The code here should only validate config state that
33728c2ecf20Sopenharmony_ci * will be relevant to all stream types / backends.
33738c2ecf20Sopenharmony_ci *
33748c2ecf20Sopenharmony_ci * Returns: zero on success or a negative error code.
33758c2ecf20Sopenharmony_ci */
33768c2ecf20Sopenharmony_cistatic int
33778c2ecf20Sopenharmony_cii915_perf_open_ioctl_locked(struct i915_perf *perf,
33788c2ecf20Sopenharmony_ci			    struct drm_i915_perf_open_param *param,
33798c2ecf20Sopenharmony_ci			    struct perf_open_properties *props,
33808c2ecf20Sopenharmony_ci			    struct drm_file *file)
33818c2ecf20Sopenharmony_ci{
33828c2ecf20Sopenharmony_ci	struct i915_gem_context *specific_ctx = NULL;
33838c2ecf20Sopenharmony_ci	struct i915_perf_stream *stream = NULL;
33848c2ecf20Sopenharmony_ci	unsigned long f_flags = 0;
33858c2ecf20Sopenharmony_ci	bool privileged_op = true;
33868c2ecf20Sopenharmony_ci	int stream_fd;
33878c2ecf20Sopenharmony_ci	int ret;
33888c2ecf20Sopenharmony_ci
33898c2ecf20Sopenharmony_ci	if (props->single_context) {
33908c2ecf20Sopenharmony_ci		u32 ctx_handle = props->ctx_handle;
33918c2ecf20Sopenharmony_ci		struct drm_i915_file_private *file_priv = file->driver_priv;
33928c2ecf20Sopenharmony_ci
33938c2ecf20Sopenharmony_ci		specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
33948c2ecf20Sopenharmony_ci		if (!specific_ctx) {
33958c2ecf20Sopenharmony_ci			DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
33968c2ecf20Sopenharmony_ci				  ctx_handle);
33978c2ecf20Sopenharmony_ci			ret = -ENOENT;
33988c2ecf20Sopenharmony_ci			goto err;
33998c2ecf20Sopenharmony_ci		}
34008c2ecf20Sopenharmony_ci	}
34018c2ecf20Sopenharmony_ci
34028c2ecf20Sopenharmony_ci	/*
34038c2ecf20Sopenharmony_ci	 * On Haswell the OA unit supports clock gating off for a specific
34048c2ecf20Sopenharmony_ci	 * context and in this mode there's no visibility of metrics for the
34058c2ecf20Sopenharmony_ci	 * rest of the system, which we consider acceptable for a
34068c2ecf20Sopenharmony_ci	 * non-privileged client.
34078c2ecf20Sopenharmony_ci	 *
34088c2ecf20Sopenharmony_ci	 * For Gen8->11 the OA unit no longer supports clock gating off for a
34098c2ecf20Sopenharmony_ci	 * specific context and the kernel can't securely stop the counters
34108c2ecf20Sopenharmony_ci	 * from updating as system-wide / global values. Even though we can
34118c2ecf20Sopenharmony_ci	 * filter reports based on the included context ID we can't block
34128c2ecf20Sopenharmony_ci	 * clients from seeing the raw / global counter values via
34138c2ecf20Sopenharmony_ci	 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
34148c2ecf20Sopenharmony_ci	 * enable the OA unit by default.
34158c2ecf20Sopenharmony_ci	 *
34168c2ecf20Sopenharmony_ci	 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
34178c2ecf20Sopenharmony_ci	 * per context basis. So we can relax requirements there if the user
34188c2ecf20Sopenharmony_ci	 * doesn't request global stream access (i.e. query based sampling
34198c2ecf20Sopenharmony_ci	 * using MI_RECORD_PERF_COUNT.
34208c2ecf20Sopenharmony_ci	 */
34218c2ecf20Sopenharmony_ci	if (IS_HASWELL(perf->i915) && specific_ctx)
34228c2ecf20Sopenharmony_ci		privileged_op = false;
34238c2ecf20Sopenharmony_ci	else if (IS_GEN(perf->i915, 12) && specific_ctx &&
34248c2ecf20Sopenharmony_ci		 (props->sample_flags & SAMPLE_OA_REPORT) == 0)
34258c2ecf20Sopenharmony_ci		privileged_op = false;
34268c2ecf20Sopenharmony_ci
34278c2ecf20Sopenharmony_ci	if (props->hold_preemption) {
34288c2ecf20Sopenharmony_ci		if (!props->single_context) {
34298c2ecf20Sopenharmony_ci			DRM_DEBUG("preemption disable with no context\n");
34308c2ecf20Sopenharmony_ci			ret = -EINVAL;
34318c2ecf20Sopenharmony_ci			goto err;
34328c2ecf20Sopenharmony_ci		}
34338c2ecf20Sopenharmony_ci		privileged_op = true;
34348c2ecf20Sopenharmony_ci	}
34358c2ecf20Sopenharmony_ci
34368c2ecf20Sopenharmony_ci	/*
34378c2ecf20Sopenharmony_ci	 * Asking for SSEU configuration is a priviliged operation.
34388c2ecf20Sopenharmony_ci	 */
34398c2ecf20Sopenharmony_ci	if (props->has_sseu)
34408c2ecf20Sopenharmony_ci		privileged_op = true;
34418c2ecf20Sopenharmony_ci	else
34428c2ecf20Sopenharmony_ci		get_default_sseu_config(&props->sseu, props->engine);
34438c2ecf20Sopenharmony_ci
34448c2ecf20Sopenharmony_ci	/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
34458c2ecf20Sopenharmony_ci	 * we check a dev.i915.perf_stream_paranoid sysctl option
34468c2ecf20Sopenharmony_ci	 * to determine if it's ok to access system wide OA counters
34478c2ecf20Sopenharmony_ci	 * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
34488c2ecf20Sopenharmony_ci	 */
34498c2ecf20Sopenharmony_ci	if (privileged_op &&
34508c2ecf20Sopenharmony_ci	    i915_perf_stream_paranoid && !perfmon_capable()) {
34518c2ecf20Sopenharmony_ci		DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
34528c2ecf20Sopenharmony_ci		ret = -EACCES;
34538c2ecf20Sopenharmony_ci		goto err_ctx;
34548c2ecf20Sopenharmony_ci	}
34558c2ecf20Sopenharmony_ci
34568c2ecf20Sopenharmony_ci	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
34578c2ecf20Sopenharmony_ci	if (!stream) {
34588c2ecf20Sopenharmony_ci		ret = -ENOMEM;
34598c2ecf20Sopenharmony_ci		goto err_ctx;
34608c2ecf20Sopenharmony_ci	}
34618c2ecf20Sopenharmony_ci
34628c2ecf20Sopenharmony_ci	stream->perf = perf;
34638c2ecf20Sopenharmony_ci	stream->ctx = specific_ctx;
34648c2ecf20Sopenharmony_ci	stream->poll_oa_period = props->poll_oa_period;
34658c2ecf20Sopenharmony_ci
34668c2ecf20Sopenharmony_ci	ret = i915_oa_stream_init(stream, param, props);
34678c2ecf20Sopenharmony_ci	if (ret)
34688c2ecf20Sopenharmony_ci		goto err_alloc;
34698c2ecf20Sopenharmony_ci
34708c2ecf20Sopenharmony_ci	/* we avoid simply assigning stream->sample_flags = props->sample_flags
34718c2ecf20Sopenharmony_ci	 * to have _stream_init check the combination of sample flags more
34728c2ecf20Sopenharmony_ci	 * thoroughly, but still this is the expected result at this point.
34738c2ecf20Sopenharmony_ci	 */
34748c2ecf20Sopenharmony_ci	if (WARN_ON(stream->sample_flags != props->sample_flags)) {
34758c2ecf20Sopenharmony_ci		ret = -ENODEV;
34768c2ecf20Sopenharmony_ci		goto err_flags;
34778c2ecf20Sopenharmony_ci	}
34788c2ecf20Sopenharmony_ci
34798c2ecf20Sopenharmony_ci	if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
34808c2ecf20Sopenharmony_ci		f_flags |= O_CLOEXEC;
34818c2ecf20Sopenharmony_ci	if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
34828c2ecf20Sopenharmony_ci		f_flags |= O_NONBLOCK;
34838c2ecf20Sopenharmony_ci
34848c2ecf20Sopenharmony_ci	stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
34858c2ecf20Sopenharmony_ci	if (stream_fd < 0) {
34868c2ecf20Sopenharmony_ci		ret = stream_fd;
34878c2ecf20Sopenharmony_ci		goto err_flags;
34888c2ecf20Sopenharmony_ci	}
34898c2ecf20Sopenharmony_ci
34908c2ecf20Sopenharmony_ci	if (!(param->flags & I915_PERF_FLAG_DISABLED))
34918c2ecf20Sopenharmony_ci		i915_perf_enable_locked(stream);
34928c2ecf20Sopenharmony_ci
34938c2ecf20Sopenharmony_ci	/* Take a reference on the driver that will be kept with stream_fd
34948c2ecf20Sopenharmony_ci	 * until its release.
34958c2ecf20Sopenharmony_ci	 */
34968c2ecf20Sopenharmony_ci	drm_dev_get(&perf->i915->drm);
34978c2ecf20Sopenharmony_ci
34988c2ecf20Sopenharmony_ci	return stream_fd;
34998c2ecf20Sopenharmony_ci
35008c2ecf20Sopenharmony_cierr_flags:
35018c2ecf20Sopenharmony_ci	if (stream->ops->destroy)
35028c2ecf20Sopenharmony_ci		stream->ops->destroy(stream);
35038c2ecf20Sopenharmony_cierr_alloc:
35048c2ecf20Sopenharmony_ci	kfree(stream);
35058c2ecf20Sopenharmony_cierr_ctx:
35068c2ecf20Sopenharmony_ci	if (specific_ctx)
35078c2ecf20Sopenharmony_ci		i915_gem_context_put(specific_ctx);
35088c2ecf20Sopenharmony_cierr:
35098c2ecf20Sopenharmony_ci	return ret;
35108c2ecf20Sopenharmony_ci}
35118c2ecf20Sopenharmony_ci
35128c2ecf20Sopenharmony_cistatic u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
35138c2ecf20Sopenharmony_ci{
35148c2ecf20Sopenharmony_ci	return i915_cs_timestamp_ticks_to_ns(perf->i915, 2ULL << exponent);
35158c2ecf20Sopenharmony_ci}
35168c2ecf20Sopenharmony_ci
35178c2ecf20Sopenharmony_ci/**
35188c2ecf20Sopenharmony_ci * read_properties_unlocked - validate + copy userspace stream open properties
35198c2ecf20Sopenharmony_ci * @perf: i915 perf instance
35208c2ecf20Sopenharmony_ci * @uprops: The array of u64 key value pairs given by userspace
35218c2ecf20Sopenharmony_ci * @n_props: The number of key value pairs expected in @uprops
35228c2ecf20Sopenharmony_ci * @props: The stream configuration built up while validating properties
35238c2ecf20Sopenharmony_ci *
35248c2ecf20Sopenharmony_ci * Note this function only validates properties in isolation it doesn't
35258c2ecf20Sopenharmony_ci * validate that the combination of properties makes sense or that all
35268c2ecf20Sopenharmony_ci * properties necessary for a particular kind of stream have been set.
35278c2ecf20Sopenharmony_ci *
35288c2ecf20Sopenharmony_ci * Note that there currently aren't any ordering requirements for properties so
35298c2ecf20Sopenharmony_ci * we shouldn't validate or assume anything about ordering here. This doesn't
35308c2ecf20Sopenharmony_ci * rule out defining new properties with ordering requirements in the future.
35318c2ecf20Sopenharmony_ci */
35328c2ecf20Sopenharmony_cistatic int read_properties_unlocked(struct i915_perf *perf,
35338c2ecf20Sopenharmony_ci				    u64 __user *uprops,
35348c2ecf20Sopenharmony_ci				    u32 n_props,
35358c2ecf20Sopenharmony_ci				    struct perf_open_properties *props)
35368c2ecf20Sopenharmony_ci{
35378c2ecf20Sopenharmony_ci	u64 __user *uprop = uprops;
35388c2ecf20Sopenharmony_ci	u32 i;
35398c2ecf20Sopenharmony_ci	int ret;
35408c2ecf20Sopenharmony_ci
35418c2ecf20Sopenharmony_ci	memset(props, 0, sizeof(struct perf_open_properties));
35428c2ecf20Sopenharmony_ci	props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
35438c2ecf20Sopenharmony_ci
35448c2ecf20Sopenharmony_ci	if (!n_props) {
35458c2ecf20Sopenharmony_ci		DRM_DEBUG("No i915 perf properties given\n");
35468c2ecf20Sopenharmony_ci		return -EINVAL;
35478c2ecf20Sopenharmony_ci	}
35488c2ecf20Sopenharmony_ci
35498c2ecf20Sopenharmony_ci	/* At the moment we only support using i915-perf on the RCS. */
35508c2ecf20Sopenharmony_ci	props->engine = intel_engine_lookup_user(perf->i915,
35518c2ecf20Sopenharmony_ci						 I915_ENGINE_CLASS_RENDER,
35528c2ecf20Sopenharmony_ci						 0);
35538c2ecf20Sopenharmony_ci	if (!props->engine) {
35548c2ecf20Sopenharmony_ci		DRM_DEBUG("No RENDER-capable engines\n");
35558c2ecf20Sopenharmony_ci		return -EINVAL;
35568c2ecf20Sopenharmony_ci	}
35578c2ecf20Sopenharmony_ci
35588c2ecf20Sopenharmony_ci	/* Considering that ID = 0 is reserved and assuming that we don't
35598c2ecf20Sopenharmony_ci	 * (currently) expect any configurations to ever specify duplicate
35608c2ecf20Sopenharmony_ci	 * values for a particular property ID then the last _PROP_MAX value is
35618c2ecf20Sopenharmony_ci	 * one greater than the maximum number of properties we expect to get
35628c2ecf20Sopenharmony_ci	 * from userspace.
35638c2ecf20Sopenharmony_ci	 */
35648c2ecf20Sopenharmony_ci	if (n_props >= DRM_I915_PERF_PROP_MAX) {
35658c2ecf20Sopenharmony_ci		DRM_DEBUG("More i915 perf properties specified than exist\n");
35668c2ecf20Sopenharmony_ci		return -EINVAL;
35678c2ecf20Sopenharmony_ci	}
35688c2ecf20Sopenharmony_ci
35698c2ecf20Sopenharmony_ci	for (i = 0; i < n_props; i++) {
35708c2ecf20Sopenharmony_ci		u64 oa_period, oa_freq_hz;
35718c2ecf20Sopenharmony_ci		u64 id, value;
35728c2ecf20Sopenharmony_ci
35738c2ecf20Sopenharmony_ci		ret = get_user(id, uprop);
35748c2ecf20Sopenharmony_ci		if (ret)
35758c2ecf20Sopenharmony_ci			return ret;
35768c2ecf20Sopenharmony_ci
35778c2ecf20Sopenharmony_ci		ret = get_user(value, uprop + 1);
35788c2ecf20Sopenharmony_ci		if (ret)
35798c2ecf20Sopenharmony_ci			return ret;
35808c2ecf20Sopenharmony_ci
35818c2ecf20Sopenharmony_ci		if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
35828c2ecf20Sopenharmony_ci			DRM_DEBUG("Unknown i915 perf property ID\n");
35838c2ecf20Sopenharmony_ci			return -EINVAL;
35848c2ecf20Sopenharmony_ci		}
35858c2ecf20Sopenharmony_ci
35868c2ecf20Sopenharmony_ci		switch ((enum drm_i915_perf_property_id)id) {
35878c2ecf20Sopenharmony_ci		case DRM_I915_PERF_PROP_CTX_HANDLE:
35888c2ecf20Sopenharmony_ci			props->single_context = 1;
35898c2ecf20Sopenharmony_ci			props->ctx_handle = value;
35908c2ecf20Sopenharmony_ci			break;
35918c2ecf20Sopenharmony_ci		case DRM_I915_PERF_PROP_SAMPLE_OA:
35928c2ecf20Sopenharmony_ci			if (value)
35938c2ecf20Sopenharmony_ci				props->sample_flags |= SAMPLE_OA_REPORT;
35948c2ecf20Sopenharmony_ci			break;
35958c2ecf20Sopenharmony_ci		case DRM_I915_PERF_PROP_OA_METRICS_SET:
35968c2ecf20Sopenharmony_ci			if (value == 0) {
35978c2ecf20Sopenharmony_ci				DRM_DEBUG("Unknown OA metric set ID\n");
35988c2ecf20Sopenharmony_ci				return -EINVAL;
35998c2ecf20Sopenharmony_ci			}
36008c2ecf20Sopenharmony_ci			props->metrics_set = value;
36018c2ecf20Sopenharmony_ci			break;
36028c2ecf20Sopenharmony_ci		case DRM_I915_PERF_PROP_OA_FORMAT:
36038c2ecf20Sopenharmony_ci			if (value == 0 || value >= I915_OA_FORMAT_MAX) {
36048c2ecf20Sopenharmony_ci				DRM_DEBUG("Out-of-range OA report format %llu\n",
36058c2ecf20Sopenharmony_ci					  value);
36068c2ecf20Sopenharmony_ci				return -EINVAL;
36078c2ecf20Sopenharmony_ci			}
36088c2ecf20Sopenharmony_ci			if (!perf->oa_formats[value].size) {
36098c2ecf20Sopenharmony_ci				DRM_DEBUG("Unsupported OA report format %llu\n",
36108c2ecf20Sopenharmony_ci					  value);
36118c2ecf20Sopenharmony_ci				return -EINVAL;
36128c2ecf20Sopenharmony_ci			}
36138c2ecf20Sopenharmony_ci			props->oa_format = value;
36148c2ecf20Sopenharmony_ci			break;
36158c2ecf20Sopenharmony_ci		case DRM_I915_PERF_PROP_OA_EXPONENT:
36168c2ecf20Sopenharmony_ci			if (value > OA_EXPONENT_MAX) {
36178c2ecf20Sopenharmony_ci				DRM_DEBUG("OA timer exponent too high (> %u)\n",
36188c2ecf20Sopenharmony_ci					 OA_EXPONENT_MAX);
36198c2ecf20Sopenharmony_ci				return -EINVAL;
36208c2ecf20Sopenharmony_ci			}
36218c2ecf20Sopenharmony_ci
36228c2ecf20Sopenharmony_ci			/* Theoretically we can program the OA unit to sample
36238c2ecf20Sopenharmony_ci			 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
36248c2ecf20Sopenharmony_ci			 * for BXT. We don't allow such high sampling
36258c2ecf20Sopenharmony_ci			 * frequencies by default unless root.
36268c2ecf20Sopenharmony_ci			 */
36278c2ecf20Sopenharmony_ci
36288c2ecf20Sopenharmony_ci			BUILD_BUG_ON(sizeof(oa_period) != 8);
36298c2ecf20Sopenharmony_ci			oa_period = oa_exponent_to_ns(perf, value);
36308c2ecf20Sopenharmony_ci
36318c2ecf20Sopenharmony_ci			/* This check is primarily to ensure that oa_period <=
36328c2ecf20Sopenharmony_ci			 * UINT32_MAX (before passing to do_div which only
36338c2ecf20Sopenharmony_ci			 * accepts a u32 denominator), but we can also skip
36348c2ecf20Sopenharmony_ci			 * checking anything < 1Hz which implicitly can't be
36358c2ecf20Sopenharmony_ci			 * limited via an integer oa_max_sample_rate.
36368c2ecf20Sopenharmony_ci			 */
36378c2ecf20Sopenharmony_ci			if (oa_period <= NSEC_PER_SEC) {
36388c2ecf20Sopenharmony_ci				u64 tmp = NSEC_PER_SEC;
36398c2ecf20Sopenharmony_ci				do_div(tmp, oa_period);
36408c2ecf20Sopenharmony_ci				oa_freq_hz = tmp;
36418c2ecf20Sopenharmony_ci			} else
36428c2ecf20Sopenharmony_ci				oa_freq_hz = 0;
36438c2ecf20Sopenharmony_ci
36448c2ecf20Sopenharmony_ci			if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
36458c2ecf20Sopenharmony_ci				DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
36468c2ecf20Sopenharmony_ci					  i915_oa_max_sample_rate);
36478c2ecf20Sopenharmony_ci				return -EACCES;
36488c2ecf20Sopenharmony_ci			}
36498c2ecf20Sopenharmony_ci
36508c2ecf20Sopenharmony_ci			props->oa_periodic = true;
36518c2ecf20Sopenharmony_ci			props->oa_period_exponent = value;
36528c2ecf20Sopenharmony_ci			break;
36538c2ecf20Sopenharmony_ci		case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
36548c2ecf20Sopenharmony_ci			props->hold_preemption = !!value;
36558c2ecf20Sopenharmony_ci			break;
36568c2ecf20Sopenharmony_ci		case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
36578c2ecf20Sopenharmony_ci			struct drm_i915_gem_context_param_sseu user_sseu;
36588c2ecf20Sopenharmony_ci
36598c2ecf20Sopenharmony_ci			if (copy_from_user(&user_sseu,
36608c2ecf20Sopenharmony_ci					   u64_to_user_ptr(value),
36618c2ecf20Sopenharmony_ci					   sizeof(user_sseu))) {
36628c2ecf20Sopenharmony_ci				DRM_DEBUG("Unable to copy global sseu parameter\n");
36638c2ecf20Sopenharmony_ci				return -EFAULT;
36648c2ecf20Sopenharmony_ci			}
36658c2ecf20Sopenharmony_ci
36668c2ecf20Sopenharmony_ci			ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
36678c2ecf20Sopenharmony_ci			if (ret) {
36688c2ecf20Sopenharmony_ci				DRM_DEBUG("Invalid SSEU configuration\n");
36698c2ecf20Sopenharmony_ci				return ret;
36708c2ecf20Sopenharmony_ci			}
36718c2ecf20Sopenharmony_ci			props->has_sseu = true;
36728c2ecf20Sopenharmony_ci			break;
36738c2ecf20Sopenharmony_ci		}
36748c2ecf20Sopenharmony_ci		case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
36758c2ecf20Sopenharmony_ci			if (value < 100000 /* 100us */) {
36768c2ecf20Sopenharmony_ci				DRM_DEBUG("OA availability timer too small (%lluns < 100us)\n",
36778c2ecf20Sopenharmony_ci					  value);
36788c2ecf20Sopenharmony_ci				return -EINVAL;
36798c2ecf20Sopenharmony_ci			}
36808c2ecf20Sopenharmony_ci			props->poll_oa_period = value;
36818c2ecf20Sopenharmony_ci			break;
36828c2ecf20Sopenharmony_ci		case DRM_I915_PERF_PROP_MAX:
36838c2ecf20Sopenharmony_ci			MISSING_CASE(id);
36848c2ecf20Sopenharmony_ci			return -EINVAL;
36858c2ecf20Sopenharmony_ci		}
36868c2ecf20Sopenharmony_ci
36878c2ecf20Sopenharmony_ci		uprop += 2;
36888c2ecf20Sopenharmony_ci	}
36898c2ecf20Sopenharmony_ci
36908c2ecf20Sopenharmony_ci	return 0;
36918c2ecf20Sopenharmony_ci}
36928c2ecf20Sopenharmony_ci
36938c2ecf20Sopenharmony_ci/**
36948c2ecf20Sopenharmony_ci * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
36958c2ecf20Sopenharmony_ci * @dev: drm device
36968c2ecf20Sopenharmony_ci * @data: ioctl data copied from userspace (unvalidated)
36978c2ecf20Sopenharmony_ci * @file: drm file
36988c2ecf20Sopenharmony_ci *
36998c2ecf20Sopenharmony_ci * Validates the stream open parameters given by userspace including flags
37008c2ecf20Sopenharmony_ci * and an array of u64 key, value pair properties.
37018c2ecf20Sopenharmony_ci *
37028c2ecf20Sopenharmony_ci * Very little is assumed up front about the nature of the stream being
37038c2ecf20Sopenharmony_ci * opened (for instance we don't assume it's for periodic OA unit metrics). An
37048c2ecf20Sopenharmony_ci * i915-perf stream is expected to be a suitable interface for other forms of
37058c2ecf20Sopenharmony_ci * buffered data written by the GPU besides periodic OA metrics.
37068c2ecf20Sopenharmony_ci *
37078c2ecf20Sopenharmony_ci * Note we copy the properties from userspace outside of the i915 perf
37088c2ecf20Sopenharmony_ci * mutex to avoid an awkward lockdep with mmap_lock.
37098c2ecf20Sopenharmony_ci *
37108c2ecf20Sopenharmony_ci * Most of the implementation details are handled by
37118c2ecf20Sopenharmony_ci * i915_perf_open_ioctl_locked() after taking the &perf->lock
37128c2ecf20Sopenharmony_ci * mutex for serializing with any non-file-operation driver hooks.
37138c2ecf20Sopenharmony_ci *
37148c2ecf20Sopenharmony_ci * Return: A newly opened i915 Perf stream file descriptor or negative
37158c2ecf20Sopenharmony_ci * error code on failure.
37168c2ecf20Sopenharmony_ci */
37178c2ecf20Sopenharmony_ciint i915_perf_open_ioctl(struct drm_device *dev, void *data,
37188c2ecf20Sopenharmony_ci			 struct drm_file *file)
37198c2ecf20Sopenharmony_ci{
37208c2ecf20Sopenharmony_ci	struct i915_perf *perf = &to_i915(dev)->perf;
37218c2ecf20Sopenharmony_ci	struct drm_i915_perf_open_param *param = data;
37228c2ecf20Sopenharmony_ci	struct perf_open_properties props;
37238c2ecf20Sopenharmony_ci	u32 known_open_flags;
37248c2ecf20Sopenharmony_ci	int ret;
37258c2ecf20Sopenharmony_ci
37268c2ecf20Sopenharmony_ci	if (!perf->i915) {
37278c2ecf20Sopenharmony_ci		DRM_DEBUG("i915 perf interface not available for this system\n");
37288c2ecf20Sopenharmony_ci		return -ENOTSUPP;
37298c2ecf20Sopenharmony_ci	}
37308c2ecf20Sopenharmony_ci
37318c2ecf20Sopenharmony_ci	known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
37328c2ecf20Sopenharmony_ci			   I915_PERF_FLAG_FD_NONBLOCK |
37338c2ecf20Sopenharmony_ci			   I915_PERF_FLAG_DISABLED;
37348c2ecf20Sopenharmony_ci	if (param->flags & ~known_open_flags) {
37358c2ecf20Sopenharmony_ci		DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
37368c2ecf20Sopenharmony_ci		return -EINVAL;
37378c2ecf20Sopenharmony_ci	}
37388c2ecf20Sopenharmony_ci
37398c2ecf20Sopenharmony_ci	ret = read_properties_unlocked(perf,
37408c2ecf20Sopenharmony_ci				       u64_to_user_ptr(param->properties_ptr),
37418c2ecf20Sopenharmony_ci				       param->num_properties,
37428c2ecf20Sopenharmony_ci				       &props);
37438c2ecf20Sopenharmony_ci	if (ret)
37448c2ecf20Sopenharmony_ci		return ret;
37458c2ecf20Sopenharmony_ci
37468c2ecf20Sopenharmony_ci	mutex_lock(&perf->lock);
37478c2ecf20Sopenharmony_ci	ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
37488c2ecf20Sopenharmony_ci	mutex_unlock(&perf->lock);
37498c2ecf20Sopenharmony_ci
37508c2ecf20Sopenharmony_ci	return ret;
37518c2ecf20Sopenharmony_ci}
37528c2ecf20Sopenharmony_ci
37538c2ecf20Sopenharmony_ci/**
37548c2ecf20Sopenharmony_ci * i915_perf_register - exposes i915-perf to userspace
37558c2ecf20Sopenharmony_ci * @i915: i915 device instance
37568c2ecf20Sopenharmony_ci *
37578c2ecf20Sopenharmony_ci * In particular OA metric sets are advertised under a sysfs metrics/
37588c2ecf20Sopenharmony_ci * directory allowing userspace to enumerate valid IDs that can be
37598c2ecf20Sopenharmony_ci * used to open an i915-perf stream.
37608c2ecf20Sopenharmony_ci */
37618c2ecf20Sopenharmony_civoid i915_perf_register(struct drm_i915_private *i915)
37628c2ecf20Sopenharmony_ci{
37638c2ecf20Sopenharmony_ci	struct i915_perf *perf = &i915->perf;
37648c2ecf20Sopenharmony_ci
37658c2ecf20Sopenharmony_ci	if (!perf->i915)
37668c2ecf20Sopenharmony_ci		return;
37678c2ecf20Sopenharmony_ci
37688c2ecf20Sopenharmony_ci	/* To be sure we're synchronized with an attempted
37698c2ecf20Sopenharmony_ci	 * i915_perf_open_ioctl(); considering that we register after
37708c2ecf20Sopenharmony_ci	 * being exposed to userspace.
37718c2ecf20Sopenharmony_ci	 */
37728c2ecf20Sopenharmony_ci	mutex_lock(&perf->lock);
37738c2ecf20Sopenharmony_ci
37748c2ecf20Sopenharmony_ci	perf->metrics_kobj =
37758c2ecf20Sopenharmony_ci		kobject_create_and_add("metrics",
37768c2ecf20Sopenharmony_ci				       &i915->drm.primary->kdev->kobj);
37778c2ecf20Sopenharmony_ci
37788c2ecf20Sopenharmony_ci	mutex_unlock(&perf->lock);
37798c2ecf20Sopenharmony_ci}
37808c2ecf20Sopenharmony_ci
37818c2ecf20Sopenharmony_ci/**
37828c2ecf20Sopenharmony_ci * i915_perf_unregister - hide i915-perf from userspace
37838c2ecf20Sopenharmony_ci * @i915: i915 device instance
37848c2ecf20Sopenharmony_ci *
37858c2ecf20Sopenharmony_ci * i915-perf state cleanup is split up into an 'unregister' and
37868c2ecf20Sopenharmony_ci * 'deinit' phase where the interface is first hidden from
37878c2ecf20Sopenharmony_ci * userspace by i915_perf_unregister() before cleaning up
37888c2ecf20Sopenharmony_ci * remaining state in i915_perf_fini().
37898c2ecf20Sopenharmony_ci */
37908c2ecf20Sopenharmony_civoid i915_perf_unregister(struct drm_i915_private *i915)
37918c2ecf20Sopenharmony_ci{
37928c2ecf20Sopenharmony_ci	struct i915_perf *perf = &i915->perf;
37938c2ecf20Sopenharmony_ci
37948c2ecf20Sopenharmony_ci	if (!perf->metrics_kobj)
37958c2ecf20Sopenharmony_ci		return;
37968c2ecf20Sopenharmony_ci
37978c2ecf20Sopenharmony_ci	kobject_put(perf->metrics_kobj);
37988c2ecf20Sopenharmony_ci	perf->metrics_kobj = NULL;
37998c2ecf20Sopenharmony_ci}
38008c2ecf20Sopenharmony_ci
38018c2ecf20Sopenharmony_cistatic bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
38028c2ecf20Sopenharmony_ci{
38038c2ecf20Sopenharmony_ci	static const i915_reg_t flex_eu_regs[] = {
38048c2ecf20Sopenharmony_ci		EU_PERF_CNTL0,
38058c2ecf20Sopenharmony_ci		EU_PERF_CNTL1,
38068c2ecf20Sopenharmony_ci		EU_PERF_CNTL2,
38078c2ecf20Sopenharmony_ci		EU_PERF_CNTL3,
38088c2ecf20Sopenharmony_ci		EU_PERF_CNTL4,
38098c2ecf20Sopenharmony_ci		EU_PERF_CNTL5,
38108c2ecf20Sopenharmony_ci		EU_PERF_CNTL6,
38118c2ecf20Sopenharmony_ci	};
38128c2ecf20Sopenharmony_ci	int i;
38138c2ecf20Sopenharmony_ci
38148c2ecf20Sopenharmony_ci	for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
38158c2ecf20Sopenharmony_ci		if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
38168c2ecf20Sopenharmony_ci			return true;
38178c2ecf20Sopenharmony_ci	}
38188c2ecf20Sopenharmony_ci	return false;
38198c2ecf20Sopenharmony_ci}
38208c2ecf20Sopenharmony_ci
38218c2ecf20Sopenharmony_ci#define ADDR_IN_RANGE(addr, start, end) \
38228c2ecf20Sopenharmony_ci	((addr) >= (start) && \
38238c2ecf20Sopenharmony_ci	 (addr) <= (end))
38248c2ecf20Sopenharmony_ci
38258c2ecf20Sopenharmony_ci#define REG_IN_RANGE(addr, start, end) \
38268c2ecf20Sopenharmony_ci	((addr) >= i915_mmio_reg_offset(start) && \
38278c2ecf20Sopenharmony_ci	 (addr) <= i915_mmio_reg_offset(end))
38288c2ecf20Sopenharmony_ci
38298c2ecf20Sopenharmony_ci#define REG_EQUAL(addr, mmio) \
38308c2ecf20Sopenharmony_ci	((addr) == i915_mmio_reg_offset(mmio))
38318c2ecf20Sopenharmony_ci
38328c2ecf20Sopenharmony_cistatic bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
38338c2ecf20Sopenharmony_ci{
38348c2ecf20Sopenharmony_ci	return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) ||
38358c2ecf20Sopenharmony_ci	       REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) ||
38368c2ecf20Sopenharmony_ci	       REG_IN_RANGE(addr, OACEC0_0, OACEC7_1);
38378c2ecf20Sopenharmony_ci}
38388c2ecf20Sopenharmony_ci
38398c2ecf20Sopenharmony_cistatic bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
38408c2ecf20Sopenharmony_ci{
38418c2ecf20Sopenharmony_ci	return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) ||
38428c2ecf20Sopenharmony_ci	       REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) ||
38438c2ecf20Sopenharmony_ci	       REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) ||
38448c2ecf20Sopenharmony_ci	       REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI);
38458c2ecf20Sopenharmony_ci}
38468c2ecf20Sopenharmony_ci
38478c2ecf20Sopenharmony_cistatic bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
38488c2ecf20Sopenharmony_ci{
38498c2ecf20Sopenharmony_ci	return gen7_is_valid_mux_addr(perf, addr) ||
38508c2ecf20Sopenharmony_ci	       REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
38518c2ecf20Sopenharmony_ci	       REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8));
38528c2ecf20Sopenharmony_ci}
38538c2ecf20Sopenharmony_ci
38548c2ecf20Sopenharmony_cistatic bool gen10_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
38558c2ecf20Sopenharmony_ci{
38568c2ecf20Sopenharmony_ci	return gen8_is_valid_mux_addr(perf, addr) ||
38578c2ecf20Sopenharmony_ci	       REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
38588c2ecf20Sopenharmony_ci	       REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI);
38598c2ecf20Sopenharmony_ci}
38608c2ecf20Sopenharmony_ci
38618c2ecf20Sopenharmony_cistatic bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
38628c2ecf20Sopenharmony_ci{
38638c2ecf20Sopenharmony_ci	return gen7_is_valid_mux_addr(perf, addr) ||
38648c2ecf20Sopenharmony_ci	       ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) ||
38658c2ecf20Sopenharmony_ci	       REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) ||
38668c2ecf20Sopenharmony_ci	       REG_EQUAL(addr, HSW_MBVID2_MISR0);
38678c2ecf20Sopenharmony_ci}
38688c2ecf20Sopenharmony_ci
38698c2ecf20Sopenharmony_cistatic bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
38708c2ecf20Sopenharmony_ci{
38718c2ecf20Sopenharmony_ci	return gen7_is_valid_mux_addr(perf, addr) ||
38728c2ecf20Sopenharmony_ci	       ADDR_IN_RANGE(addr, 0x182300, 0x1823A4);
38738c2ecf20Sopenharmony_ci}
38748c2ecf20Sopenharmony_ci
38758c2ecf20Sopenharmony_cistatic bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
38768c2ecf20Sopenharmony_ci{
38778c2ecf20Sopenharmony_ci	return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) ||
38788c2ecf20Sopenharmony_ci	       REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) ||
38798c2ecf20Sopenharmony_ci	       REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) ||
38808c2ecf20Sopenharmony_ci	       REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) ||
38818c2ecf20Sopenharmony_ci	       REG_EQUAL(addr, GEN12_OAA_DBG_REG) ||
38828c2ecf20Sopenharmony_ci	       REG_EQUAL(addr, GEN12_OAG_OA_PESS) ||
38838c2ecf20Sopenharmony_ci	       REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF);
38848c2ecf20Sopenharmony_ci}
38858c2ecf20Sopenharmony_ci
38868c2ecf20Sopenharmony_cistatic bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
38878c2ecf20Sopenharmony_ci{
38888c2ecf20Sopenharmony_ci	return REG_EQUAL(addr, NOA_WRITE) ||
38898c2ecf20Sopenharmony_ci	       REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
38908c2ecf20Sopenharmony_ci	       REG_EQUAL(addr, GDT_CHICKEN_BITS) ||
38918c2ecf20Sopenharmony_ci	       REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
38928c2ecf20Sopenharmony_ci	       REG_EQUAL(addr, RPM_CONFIG0) ||
38938c2ecf20Sopenharmony_ci	       REG_EQUAL(addr, RPM_CONFIG1) ||
38948c2ecf20Sopenharmony_ci	       REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8));
38958c2ecf20Sopenharmony_ci}
38968c2ecf20Sopenharmony_ci
38978c2ecf20Sopenharmony_cistatic u32 mask_reg_value(u32 reg, u32 val)
38988c2ecf20Sopenharmony_ci{
38998c2ecf20Sopenharmony_ci	/* HALF_SLICE_CHICKEN2 is programmed with a the
39008c2ecf20Sopenharmony_ci	 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
39018c2ecf20Sopenharmony_ci	 * programmed by userspace doesn't change this.
39028c2ecf20Sopenharmony_ci	 */
39038c2ecf20Sopenharmony_ci	if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
39048c2ecf20Sopenharmony_ci		val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
39058c2ecf20Sopenharmony_ci
39068c2ecf20Sopenharmony_ci	/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
39078c2ecf20Sopenharmony_ci	 * indicated by its name and a bunch of selection fields used by OA
39088c2ecf20Sopenharmony_ci	 * configs.
39098c2ecf20Sopenharmony_ci	 */
39108c2ecf20Sopenharmony_ci	if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
39118c2ecf20Sopenharmony_ci		val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
39128c2ecf20Sopenharmony_ci
39138c2ecf20Sopenharmony_ci	return val;
39148c2ecf20Sopenharmony_ci}
39158c2ecf20Sopenharmony_ci
39168c2ecf20Sopenharmony_cistatic struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
39178c2ecf20Sopenharmony_ci					 bool (*is_valid)(struct i915_perf *perf, u32 addr),
39188c2ecf20Sopenharmony_ci					 u32 __user *regs,
39198c2ecf20Sopenharmony_ci					 u32 n_regs)
39208c2ecf20Sopenharmony_ci{
39218c2ecf20Sopenharmony_ci	struct i915_oa_reg *oa_regs;
39228c2ecf20Sopenharmony_ci	int err;
39238c2ecf20Sopenharmony_ci	u32 i;
39248c2ecf20Sopenharmony_ci
39258c2ecf20Sopenharmony_ci	if (!n_regs)
39268c2ecf20Sopenharmony_ci		return NULL;
39278c2ecf20Sopenharmony_ci
39288c2ecf20Sopenharmony_ci	/* No is_valid function means we're not allowing any register to be programmed. */
39298c2ecf20Sopenharmony_ci	GEM_BUG_ON(!is_valid);
39308c2ecf20Sopenharmony_ci	if (!is_valid)
39318c2ecf20Sopenharmony_ci		return ERR_PTR(-EINVAL);
39328c2ecf20Sopenharmony_ci
39338c2ecf20Sopenharmony_ci	oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
39348c2ecf20Sopenharmony_ci	if (!oa_regs)
39358c2ecf20Sopenharmony_ci		return ERR_PTR(-ENOMEM);
39368c2ecf20Sopenharmony_ci
39378c2ecf20Sopenharmony_ci	for (i = 0; i < n_regs; i++) {
39388c2ecf20Sopenharmony_ci		u32 addr, value;
39398c2ecf20Sopenharmony_ci
39408c2ecf20Sopenharmony_ci		err = get_user(addr, regs);
39418c2ecf20Sopenharmony_ci		if (err)
39428c2ecf20Sopenharmony_ci			goto addr_err;
39438c2ecf20Sopenharmony_ci
39448c2ecf20Sopenharmony_ci		if (!is_valid(perf, addr)) {
39458c2ecf20Sopenharmony_ci			DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
39468c2ecf20Sopenharmony_ci			err = -EINVAL;
39478c2ecf20Sopenharmony_ci			goto addr_err;
39488c2ecf20Sopenharmony_ci		}
39498c2ecf20Sopenharmony_ci
39508c2ecf20Sopenharmony_ci		err = get_user(value, regs + 1);
39518c2ecf20Sopenharmony_ci		if (err)
39528c2ecf20Sopenharmony_ci			goto addr_err;
39538c2ecf20Sopenharmony_ci
39548c2ecf20Sopenharmony_ci		oa_regs[i].addr = _MMIO(addr);
39558c2ecf20Sopenharmony_ci		oa_regs[i].value = mask_reg_value(addr, value);
39568c2ecf20Sopenharmony_ci
39578c2ecf20Sopenharmony_ci		regs += 2;
39588c2ecf20Sopenharmony_ci	}
39598c2ecf20Sopenharmony_ci
39608c2ecf20Sopenharmony_ci	return oa_regs;
39618c2ecf20Sopenharmony_ci
39628c2ecf20Sopenharmony_ciaddr_err:
39638c2ecf20Sopenharmony_ci	kfree(oa_regs);
39648c2ecf20Sopenharmony_ci	return ERR_PTR(err);
39658c2ecf20Sopenharmony_ci}
39668c2ecf20Sopenharmony_ci
39678c2ecf20Sopenharmony_cistatic ssize_t show_dynamic_id(struct kobject *kobj,
39688c2ecf20Sopenharmony_ci			       struct kobj_attribute *attr,
39698c2ecf20Sopenharmony_ci			       char *buf)
39708c2ecf20Sopenharmony_ci{
39718c2ecf20Sopenharmony_ci	struct i915_oa_config *oa_config =
39728c2ecf20Sopenharmony_ci		container_of(attr, typeof(*oa_config), sysfs_metric_id);
39738c2ecf20Sopenharmony_ci
39748c2ecf20Sopenharmony_ci	return sprintf(buf, "%d\n", oa_config->id);
39758c2ecf20Sopenharmony_ci}
39768c2ecf20Sopenharmony_ci
39778c2ecf20Sopenharmony_cistatic int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
39788c2ecf20Sopenharmony_ci					 struct i915_oa_config *oa_config)
39798c2ecf20Sopenharmony_ci{
39808c2ecf20Sopenharmony_ci	sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
39818c2ecf20Sopenharmony_ci	oa_config->sysfs_metric_id.attr.name = "id";
39828c2ecf20Sopenharmony_ci	oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
39838c2ecf20Sopenharmony_ci	oa_config->sysfs_metric_id.show = show_dynamic_id;
39848c2ecf20Sopenharmony_ci	oa_config->sysfs_metric_id.store = NULL;
39858c2ecf20Sopenharmony_ci
39868c2ecf20Sopenharmony_ci	oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
39878c2ecf20Sopenharmony_ci	oa_config->attrs[1] = NULL;
39888c2ecf20Sopenharmony_ci
39898c2ecf20Sopenharmony_ci	oa_config->sysfs_metric.name = oa_config->uuid;
39908c2ecf20Sopenharmony_ci	oa_config->sysfs_metric.attrs = oa_config->attrs;
39918c2ecf20Sopenharmony_ci
39928c2ecf20Sopenharmony_ci	return sysfs_create_group(perf->metrics_kobj,
39938c2ecf20Sopenharmony_ci				  &oa_config->sysfs_metric);
39948c2ecf20Sopenharmony_ci}
39958c2ecf20Sopenharmony_ci
39968c2ecf20Sopenharmony_ci/**
39978c2ecf20Sopenharmony_ci * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
39988c2ecf20Sopenharmony_ci * @dev: drm device
39998c2ecf20Sopenharmony_ci * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
40008c2ecf20Sopenharmony_ci *        userspace (unvalidated)
40018c2ecf20Sopenharmony_ci * @file: drm file
40028c2ecf20Sopenharmony_ci *
40038c2ecf20Sopenharmony_ci * Validates the submitted OA register to be saved into a new OA config that
40048c2ecf20Sopenharmony_ci * can then be used for programming the OA unit and its NOA network.
40058c2ecf20Sopenharmony_ci *
40068c2ecf20Sopenharmony_ci * Returns: A new allocated config number to be used with the perf open ioctl
40078c2ecf20Sopenharmony_ci * or a negative error code on failure.
40088c2ecf20Sopenharmony_ci */
40098c2ecf20Sopenharmony_ciint i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
40108c2ecf20Sopenharmony_ci			       struct drm_file *file)
40118c2ecf20Sopenharmony_ci{
40128c2ecf20Sopenharmony_ci	struct i915_perf *perf = &to_i915(dev)->perf;
40138c2ecf20Sopenharmony_ci	struct drm_i915_perf_oa_config *args = data;
40148c2ecf20Sopenharmony_ci	struct i915_oa_config *oa_config, *tmp;
40158c2ecf20Sopenharmony_ci	struct i915_oa_reg *regs;
40168c2ecf20Sopenharmony_ci	int err, id;
40178c2ecf20Sopenharmony_ci
40188c2ecf20Sopenharmony_ci	if (!perf->i915) {
40198c2ecf20Sopenharmony_ci		DRM_DEBUG("i915 perf interface not available for this system\n");
40208c2ecf20Sopenharmony_ci		return -ENOTSUPP;
40218c2ecf20Sopenharmony_ci	}
40228c2ecf20Sopenharmony_ci
40238c2ecf20Sopenharmony_ci	if (!perf->metrics_kobj) {
40248c2ecf20Sopenharmony_ci		DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
40258c2ecf20Sopenharmony_ci		return -EINVAL;
40268c2ecf20Sopenharmony_ci	}
40278c2ecf20Sopenharmony_ci
40288c2ecf20Sopenharmony_ci	if (i915_perf_stream_paranoid && !perfmon_capable()) {
40298c2ecf20Sopenharmony_ci		DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
40308c2ecf20Sopenharmony_ci		return -EACCES;
40318c2ecf20Sopenharmony_ci	}
40328c2ecf20Sopenharmony_ci
40338c2ecf20Sopenharmony_ci	if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
40348c2ecf20Sopenharmony_ci	    (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
40358c2ecf20Sopenharmony_ci	    (!args->flex_regs_ptr || !args->n_flex_regs)) {
40368c2ecf20Sopenharmony_ci		DRM_DEBUG("No OA registers given\n");
40378c2ecf20Sopenharmony_ci		return -EINVAL;
40388c2ecf20Sopenharmony_ci	}
40398c2ecf20Sopenharmony_ci
40408c2ecf20Sopenharmony_ci	oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
40418c2ecf20Sopenharmony_ci	if (!oa_config) {
40428c2ecf20Sopenharmony_ci		DRM_DEBUG("Failed to allocate memory for the OA config\n");
40438c2ecf20Sopenharmony_ci		return -ENOMEM;
40448c2ecf20Sopenharmony_ci	}
40458c2ecf20Sopenharmony_ci
40468c2ecf20Sopenharmony_ci	oa_config->perf = perf;
40478c2ecf20Sopenharmony_ci	kref_init(&oa_config->ref);
40488c2ecf20Sopenharmony_ci
40498c2ecf20Sopenharmony_ci	if (!uuid_is_valid(args->uuid)) {
40508c2ecf20Sopenharmony_ci		DRM_DEBUG("Invalid uuid format for OA config\n");
40518c2ecf20Sopenharmony_ci		err = -EINVAL;
40528c2ecf20Sopenharmony_ci		goto reg_err;
40538c2ecf20Sopenharmony_ci	}
40548c2ecf20Sopenharmony_ci
40558c2ecf20Sopenharmony_ci	/* Last character in oa_config->uuid will be 0 because oa_config is
40568c2ecf20Sopenharmony_ci	 * kzalloc.
40578c2ecf20Sopenharmony_ci	 */
40588c2ecf20Sopenharmony_ci	memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
40598c2ecf20Sopenharmony_ci
40608c2ecf20Sopenharmony_ci	oa_config->mux_regs_len = args->n_mux_regs;
40618c2ecf20Sopenharmony_ci	regs = alloc_oa_regs(perf,
40628c2ecf20Sopenharmony_ci			     perf->ops.is_valid_mux_reg,
40638c2ecf20Sopenharmony_ci			     u64_to_user_ptr(args->mux_regs_ptr),
40648c2ecf20Sopenharmony_ci			     args->n_mux_regs);
40658c2ecf20Sopenharmony_ci
40668c2ecf20Sopenharmony_ci	if (IS_ERR(regs)) {
40678c2ecf20Sopenharmony_ci		DRM_DEBUG("Failed to create OA config for mux_regs\n");
40688c2ecf20Sopenharmony_ci		err = PTR_ERR(regs);
40698c2ecf20Sopenharmony_ci		goto reg_err;
40708c2ecf20Sopenharmony_ci	}
40718c2ecf20Sopenharmony_ci	oa_config->mux_regs = regs;
40728c2ecf20Sopenharmony_ci
40738c2ecf20Sopenharmony_ci	oa_config->b_counter_regs_len = args->n_boolean_regs;
40748c2ecf20Sopenharmony_ci	regs = alloc_oa_regs(perf,
40758c2ecf20Sopenharmony_ci			     perf->ops.is_valid_b_counter_reg,
40768c2ecf20Sopenharmony_ci			     u64_to_user_ptr(args->boolean_regs_ptr),
40778c2ecf20Sopenharmony_ci			     args->n_boolean_regs);
40788c2ecf20Sopenharmony_ci
40798c2ecf20Sopenharmony_ci	if (IS_ERR(regs)) {
40808c2ecf20Sopenharmony_ci		DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
40818c2ecf20Sopenharmony_ci		err = PTR_ERR(regs);
40828c2ecf20Sopenharmony_ci		goto reg_err;
40838c2ecf20Sopenharmony_ci	}
40848c2ecf20Sopenharmony_ci	oa_config->b_counter_regs = regs;
40858c2ecf20Sopenharmony_ci
40868c2ecf20Sopenharmony_ci	if (INTEL_GEN(perf->i915) < 8) {
40878c2ecf20Sopenharmony_ci		if (args->n_flex_regs != 0) {
40888c2ecf20Sopenharmony_ci			err = -EINVAL;
40898c2ecf20Sopenharmony_ci			goto reg_err;
40908c2ecf20Sopenharmony_ci		}
40918c2ecf20Sopenharmony_ci	} else {
40928c2ecf20Sopenharmony_ci		oa_config->flex_regs_len = args->n_flex_regs;
40938c2ecf20Sopenharmony_ci		regs = alloc_oa_regs(perf,
40948c2ecf20Sopenharmony_ci				     perf->ops.is_valid_flex_reg,
40958c2ecf20Sopenharmony_ci				     u64_to_user_ptr(args->flex_regs_ptr),
40968c2ecf20Sopenharmony_ci				     args->n_flex_regs);
40978c2ecf20Sopenharmony_ci
40988c2ecf20Sopenharmony_ci		if (IS_ERR(regs)) {
40998c2ecf20Sopenharmony_ci			DRM_DEBUG("Failed to create OA config for flex_regs\n");
41008c2ecf20Sopenharmony_ci			err = PTR_ERR(regs);
41018c2ecf20Sopenharmony_ci			goto reg_err;
41028c2ecf20Sopenharmony_ci		}
41038c2ecf20Sopenharmony_ci		oa_config->flex_regs = regs;
41048c2ecf20Sopenharmony_ci	}
41058c2ecf20Sopenharmony_ci
41068c2ecf20Sopenharmony_ci	err = mutex_lock_interruptible(&perf->metrics_lock);
41078c2ecf20Sopenharmony_ci	if (err)
41088c2ecf20Sopenharmony_ci		goto reg_err;
41098c2ecf20Sopenharmony_ci
41108c2ecf20Sopenharmony_ci	/* We shouldn't have too many configs, so this iteration shouldn't be
41118c2ecf20Sopenharmony_ci	 * too costly.
41128c2ecf20Sopenharmony_ci	 */
41138c2ecf20Sopenharmony_ci	idr_for_each_entry(&perf->metrics_idr, tmp, id) {
41148c2ecf20Sopenharmony_ci		if (!strcmp(tmp->uuid, oa_config->uuid)) {
41158c2ecf20Sopenharmony_ci			DRM_DEBUG("OA config already exists with this uuid\n");
41168c2ecf20Sopenharmony_ci			err = -EADDRINUSE;
41178c2ecf20Sopenharmony_ci			goto sysfs_err;
41188c2ecf20Sopenharmony_ci		}
41198c2ecf20Sopenharmony_ci	}
41208c2ecf20Sopenharmony_ci
41218c2ecf20Sopenharmony_ci	err = create_dynamic_oa_sysfs_entry(perf, oa_config);
41228c2ecf20Sopenharmony_ci	if (err) {
41238c2ecf20Sopenharmony_ci		DRM_DEBUG("Failed to create sysfs entry for OA config\n");
41248c2ecf20Sopenharmony_ci		goto sysfs_err;
41258c2ecf20Sopenharmony_ci	}
41268c2ecf20Sopenharmony_ci
41278c2ecf20Sopenharmony_ci	/* Config id 0 is invalid, id 1 for kernel stored test config. */
41288c2ecf20Sopenharmony_ci	oa_config->id = idr_alloc(&perf->metrics_idr,
41298c2ecf20Sopenharmony_ci				  oa_config, 2,
41308c2ecf20Sopenharmony_ci				  0, GFP_KERNEL);
41318c2ecf20Sopenharmony_ci	if (oa_config->id < 0) {
41328c2ecf20Sopenharmony_ci		DRM_DEBUG("Failed to create sysfs entry for OA config\n");
41338c2ecf20Sopenharmony_ci		err = oa_config->id;
41348c2ecf20Sopenharmony_ci		goto sysfs_err;
41358c2ecf20Sopenharmony_ci	}
41368c2ecf20Sopenharmony_ci
41378c2ecf20Sopenharmony_ci	mutex_unlock(&perf->metrics_lock);
41388c2ecf20Sopenharmony_ci
41398c2ecf20Sopenharmony_ci	DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
41408c2ecf20Sopenharmony_ci
41418c2ecf20Sopenharmony_ci	return oa_config->id;
41428c2ecf20Sopenharmony_ci
41438c2ecf20Sopenharmony_cisysfs_err:
41448c2ecf20Sopenharmony_ci	mutex_unlock(&perf->metrics_lock);
41458c2ecf20Sopenharmony_cireg_err:
41468c2ecf20Sopenharmony_ci	i915_oa_config_put(oa_config);
41478c2ecf20Sopenharmony_ci	DRM_DEBUG("Failed to add new OA config\n");
41488c2ecf20Sopenharmony_ci	return err;
41498c2ecf20Sopenharmony_ci}
41508c2ecf20Sopenharmony_ci
41518c2ecf20Sopenharmony_ci/**
41528c2ecf20Sopenharmony_ci * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
41538c2ecf20Sopenharmony_ci * @dev: drm device
41548c2ecf20Sopenharmony_ci * @data: ioctl data (pointer to u64 integer) copied from userspace
41558c2ecf20Sopenharmony_ci * @file: drm file
41568c2ecf20Sopenharmony_ci *
41578c2ecf20Sopenharmony_ci * Configs can be removed while being used, the will stop appearing in sysfs
41588c2ecf20Sopenharmony_ci * and their content will be freed when the stream using the config is closed.
41598c2ecf20Sopenharmony_ci *
41608c2ecf20Sopenharmony_ci * Returns: 0 on success or a negative error code on failure.
41618c2ecf20Sopenharmony_ci */
41628c2ecf20Sopenharmony_ciint i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
41638c2ecf20Sopenharmony_ci				  struct drm_file *file)
41648c2ecf20Sopenharmony_ci{
41658c2ecf20Sopenharmony_ci	struct i915_perf *perf = &to_i915(dev)->perf;
41668c2ecf20Sopenharmony_ci	u64 *arg = data;
41678c2ecf20Sopenharmony_ci	struct i915_oa_config *oa_config;
41688c2ecf20Sopenharmony_ci	int ret;
41698c2ecf20Sopenharmony_ci
41708c2ecf20Sopenharmony_ci	if (!perf->i915) {
41718c2ecf20Sopenharmony_ci		DRM_DEBUG("i915 perf interface not available for this system\n");
41728c2ecf20Sopenharmony_ci		return -ENOTSUPP;
41738c2ecf20Sopenharmony_ci	}
41748c2ecf20Sopenharmony_ci
41758c2ecf20Sopenharmony_ci	if (i915_perf_stream_paranoid && !perfmon_capable()) {
41768c2ecf20Sopenharmony_ci		DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
41778c2ecf20Sopenharmony_ci		return -EACCES;
41788c2ecf20Sopenharmony_ci	}
41798c2ecf20Sopenharmony_ci
41808c2ecf20Sopenharmony_ci	ret = mutex_lock_interruptible(&perf->metrics_lock);
41818c2ecf20Sopenharmony_ci	if (ret)
41828c2ecf20Sopenharmony_ci		return ret;
41838c2ecf20Sopenharmony_ci
41848c2ecf20Sopenharmony_ci	oa_config = idr_find(&perf->metrics_idr, *arg);
41858c2ecf20Sopenharmony_ci	if (!oa_config) {
41868c2ecf20Sopenharmony_ci		DRM_DEBUG("Failed to remove unknown OA config\n");
41878c2ecf20Sopenharmony_ci		ret = -ENOENT;
41888c2ecf20Sopenharmony_ci		goto err_unlock;
41898c2ecf20Sopenharmony_ci	}
41908c2ecf20Sopenharmony_ci
41918c2ecf20Sopenharmony_ci	GEM_BUG_ON(*arg != oa_config->id);
41928c2ecf20Sopenharmony_ci
41938c2ecf20Sopenharmony_ci	sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
41948c2ecf20Sopenharmony_ci
41958c2ecf20Sopenharmony_ci	idr_remove(&perf->metrics_idr, *arg);
41968c2ecf20Sopenharmony_ci
41978c2ecf20Sopenharmony_ci	mutex_unlock(&perf->metrics_lock);
41988c2ecf20Sopenharmony_ci
41998c2ecf20Sopenharmony_ci	DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
42008c2ecf20Sopenharmony_ci
42018c2ecf20Sopenharmony_ci	i915_oa_config_put(oa_config);
42028c2ecf20Sopenharmony_ci
42038c2ecf20Sopenharmony_ci	return 0;
42048c2ecf20Sopenharmony_ci
42058c2ecf20Sopenharmony_cierr_unlock:
42068c2ecf20Sopenharmony_ci	mutex_unlock(&perf->metrics_lock);
42078c2ecf20Sopenharmony_ci	return ret;
42088c2ecf20Sopenharmony_ci}
42098c2ecf20Sopenharmony_ci
42108c2ecf20Sopenharmony_cistatic struct ctl_table oa_table[] = {
42118c2ecf20Sopenharmony_ci	{
42128c2ecf20Sopenharmony_ci	 .procname = "perf_stream_paranoid",
42138c2ecf20Sopenharmony_ci	 .data = &i915_perf_stream_paranoid,
42148c2ecf20Sopenharmony_ci	 .maxlen = sizeof(i915_perf_stream_paranoid),
42158c2ecf20Sopenharmony_ci	 .mode = 0644,
42168c2ecf20Sopenharmony_ci	 .proc_handler = proc_dointvec_minmax,
42178c2ecf20Sopenharmony_ci	 .extra1 = SYSCTL_ZERO,
42188c2ecf20Sopenharmony_ci	 .extra2 = SYSCTL_ONE,
42198c2ecf20Sopenharmony_ci	 },
42208c2ecf20Sopenharmony_ci	{
42218c2ecf20Sopenharmony_ci	 .procname = "oa_max_sample_rate",
42228c2ecf20Sopenharmony_ci	 .data = &i915_oa_max_sample_rate,
42238c2ecf20Sopenharmony_ci	 .maxlen = sizeof(i915_oa_max_sample_rate),
42248c2ecf20Sopenharmony_ci	 .mode = 0644,
42258c2ecf20Sopenharmony_ci	 .proc_handler = proc_dointvec_minmax,
42268c2ecf20Sopenharmony_ci	 .extra1 = SYSCTL_ZERO,
42278c2ecf20Sopenharmony_ci	 .extra2 = &oa_sample_rate_hard_limit,
42288c2ecf20Sopenharmony_ci	 },
42298c2ecf20Sopenharmony_ci	{}
42308c2ecf20Sopenharmony_ci};
42318c2ecf20Sopenharmony_ci
42328c2ecf20Sopenharmony_cistatic struct ctl_table i915_root[] = {
42338c2ecf20Sopenharmony_ci	{
42348c2ecf20Sopenharmony_ci	 .procname = "i915",
42358c2ecf20Sopenharmony_ci	 .maxlen = 0,
42368c2ecf20Sopenharmony_ci	 .mode = 0555,
42378c2ecf20Sopenharmony_ci	 .child = oa_table,
42388c2ecf20Sopenharmony_ci	 },
42398c2ecf20Sopenharmony_ci	{}
42408c2ecf20Sopenharmony_ci};
42418c2ecf20Sopenharmony_ci
42428c2ecf20Sopenharmony_cistatic struct ctl_table dev_root[] = {
42438c2ecf20Sopenharmony_ci	{
42448c2ecf20Sopenharmony_ci	 .procname = "dev",
42458c2ecf20Sopenharmony_ci	 .maxlen = 0,
42468c2ecf20Sopenharmony_ci	 .mode = 0555,
42478c2ecf20Sopenharmony_ci	 .child = i915_root,
42488c2ecf20Sopenharmony_ci	 },
42498c2ecf20Sopenharmony_ci	{}
42508c2ecf20Sopenharmony_ci};
42518c2ecf20Sopenharmony_ci
42528c2ecf20Sopenharmony_ci/**
42538c2ecf20Sopenharmony_ci * i915_perf_init - initialize i915-perf state on module bind
42548c2ecf20Sopenharmony_ci * @i915: i915 device instance
42558c2ecf20Sopenharmony_ci *
42568c2ecf20Sopenharmony_ci * Initializes i915-perf state without exposing anything to userspace.
42578c2ecf20Sopenharmony_ci *
42588c2ecf20Sopenharmony_ci * Note: i915-perf initialization is split into an 'init' and 'register'
42598c2ecf20Sopenharmony_ci * phase with the i915_perf_register() exposing state to userspace.
42608c2ecf20Sopenharmony_ci */
42618c2ecf20Sopenharmony_civoid i915_perf_init(struct drm_i915_private *i915)
42628c2ecf20Sopenharmony_ci{
42638c2ecf20Sopenharmony_ci	struct i915_perf *perf = &i915->perf;
42648c2ecf20Sopenharmony_ci
42658c2ecf20Sopenharmony_ci	/* XXX const struct i915_perf_ops! */
42668c2ecf20Sopenharmony_ci
42678c2ecf20Sopenharmony_ci	if (IS_HASWELL(i915)) {
42688c2ecf20Sopenharmony_ci		perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
42698c2ecf20Sopenharmony_ci		perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
42708c2ecf20Sopenharmony_ci		perf->ops.is_valid_flex_reg = NULL;
42718c2ecf20Sopenharmony_ci		perf->ops.enable_metric_set = hsw_enable_metric_set;
42728c2ecf20Sopenharmony_ci		perf->ops.disable_metric_set = hsw_disable_metric_set;
42738c2ecf20Sopenharmony_ci		perf->ops.oa_enable = gen7_oa_enable;
42748c2ecf20Sopenharmony_ci		perf->ops.oa_disable = gen7_oa_disable;
42758c2ecf20Sopenharmony_ci		perf->ops.read = gen7_oa_read;
42768c2ecf20Sopenharmony_ci		perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
42778c2ecf20Sopenharmony_ci
42788c2ecf20Sopenharmony_ci		perf->oa_formats = hsw_oa_formats;
42798c2ecf20Sopenharmony_ci	} else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
42808c2ecf20Sopenharmony_ci		/* Note: that although we could theoretically also support the
42818c2ecf20Sopenharmony_ci		 * legacy ringbuffer mode on BDW (and earlier iterations of
42828c2ecf20Sopenharmony_ci		 * this driver, before upstreaming did this) it didn't seem
42838c2ecf20Sopenharmony_ci		 * worth the complexity to maintain now that BDW+ enable
42848c2ecf20Sopenharmony_ci		 * execlist mode by default.
42858c2ecf20Sopenharmony_ci		 */
42868c2ecf20Sopenharmony_ci		perf->ops.read = gen8_oa_read;
42878c2ecf20Sopenharmony_ci
42888c2ecf20Sopenharmony_ci		if (IS_GEN_RANGE(i915, 8, 9)) {
42898c2ecf20Sopenharmony_ci			perf->oa_formats = gen8_plus_oa_formats;
42908c2ecf20Sopenharmony_ci
42918c2ecf20Sopenharmony_ci			perf->ops.is_valid_b_counter_reg =
42928c2ecf20Sopenharmony_ci				gen7_is_valid_b_counter_addr;
42938c2ecf20Sopenharmony_ci			perf->ops.is_valid_mux_reg =
42948c2ecf20Sopenharmony_ci				gen8_is_valid_mux_addr;
42958c2ecf20Sopenharmony_ci			perf->ops.is_valid_flex_reg =
42968c2ecf20Sopenharmony_ci				gen8_is_valid_flex_addr;
42978c2ecf20Sopenharmony_ci
42988c2ecf20Sopenharmony_ci			if (IS_CHERRYVIEW(i915)) {
42998c2ecf20Sopenharmony_ci				perf->ops.is_valid_mux_reg =
43008c2ecf20Sopenharmony_ci					chv_is_valid_mux_addr;
43018c2ecf20Sopenharmony_ci			}
43028c2ecf20Sopenharmony_ci
43038c2ecf20Sopenharmony_ci			perf->ops.oa_enable = gen8_oa_enable;
43048c2ecf20Sopenharmony_ci			perf->ops.oa_disable = gen8_oa_disable;
43058c2ecf20Sopenharmony_ci			perf->ops.enable_metric_set = gen8_enable_metric_set;
43068c2ecf20Sopenharmony_ci			perf->ops.disable_metric_set = gen8_disable_metric_set;
43078c2ecf20Sopenharmony_ci			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
43088c2ecf20Sopenharmony_ci
43098c2ecf20Sopenharmony_ci			if (IS_GEN(i915, 8)) {
43108c2ecf20Sopenharmony_ci				perf->ctx_oactxctrl_offset = 0x120;
43118c2ecf20Sopenharmony_ci				perf->ctx_flexeu0_offset = 0x2ce;
43128c2ecf20Sopenharmony_ci
43138c2ecf20Sopenharmony_ci				perf->gen8_valid_ctx_bit = BIT(25);
43148c2ecf20Sopenharmony_ci			} else {
43158c2ecf20Sopenharmony_ci				perf->ctx_oactxctrl_offset = 0x128;
43168c2ecf20Sopenharmony_ci				perf->ctx_flexeu0_offset = 0x3de;
43178c2ecf20Sopenharmony_ci
43188c2ecf20Sopenharmony_ci				perf->gen8_valid_ctx_bit = BIT(16);
43198c2ecf20Sopenharmony_ci			}
43208c2ecf20Sopenharmony_ci		} else if (IS_GEN_RANGE(i915, 10, 11)) {
43218c2ecf20Sopenharmony_ci			perf->oa_formats = gen8_plus_oa_formats;
43228c2ecf20Sopenharmony_ci
43238c2ecf20Sopenharmony_ci			perf->ops.is_valid_b_counter_reg =
43248c2ecf20Sopenharmony_ci				gen7_is_valid_b_counter_addr;
43258c2ecf20Sopenharmony_ci			perf->ops.is_valid_mux_reg =
43268c2ecf20Sopenharmony_ci				gen10_is_valid_mux_addr;
43278c2ecf20Sopenharmony_ci			perf->ops.is_valid_flex_reg =
43288c2ecf20Sopenharmony_ci				gen8_is_valid_flex_addr;
43298c2ecf20Sopenharmony_ci
43308c2ecf20Sopenharmony_ci			perf->ops.oa_enable = gen8_oa_enable;
43318c2ecf20Sopenharmony_ci			perf->ops.oa_disable = gen8_oa_disable;
43328c2ecf20Sopenharmony_ci			perf->ops.enable_metric_set = gen8_enable_metric_set;
43338c2ecf20Sopenharmony_ci			perf->ops.disable_metric_set = gen10_disable_metric_set;
43348c2ecf20Sopenharmony_ci			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
43358c2ecf20Sopenharmony_ci
43368c2ecf20Sopenharmony_ci			if (IS_GEN(i915, 10)) {
43378c2ecf20Sopenharmony_ci				perf->ctx_oactxctrl_offset = 0x128;
43388c2ecf20Sopenharmony_ci				perf->ctx_flexeu0_offset = 0x3de;
43398c2ecf20Sopenharmony_ci			} else {
43408c2ecf20Sopenharmony_ci				perf->ctx_oactxctrl_offset = 0x124;
43418c2ecf20Sopenharmony_ci				perf->ctx_flexeu0_offset = 0x78e;
43428c2ecf20Sopenharmony_ci			}
43438c2ecf20Sopenharmony_ci			perf->gen8_valid_ctx_bit = BIT(16);
43448c2ecf20Sopenharmony_ci		} else if (IS_GEN(i915, 12)) {
43458c2ecf20Sopenharmony_ci			perf->oa_formats = gen12_oa_formats;
43468c2ecf20Sopenharmony_ci
43478c2ecf20Sopenharmony_ci			perf->ops.is_valid_b_counter_reg =
43488c2ecf20Sopenharmony_ci				gen12_is_valid_b_counter_addr;
43498c2ecf20Sopenharmony_ci			perf->ops.is_valid_mux_reg =
43508c2ecf20Sopenharmony_ci				gen12_is_valid_mux_addr;
43518c2ecf20Sopenharmony_ci			perf->ops.is_valid_flex_reg =
43528c2ecf20Sopenharmony_ci				gen8_is_valid_flex_addr;
43538c2ecf20Sopenharmony_ci
43548c2ecf20Sopenharmony_ci			perf->ops.oa_enable = gen12_oa_enable;
43558c2ecf20Sopenharmony_ci			perf->ops.oa_disable = gen12_oa_disable;
43568c2ecf20Sopenharmony_ci			perf->ops.enable_metric_set = gen12_enable_metric_set;
43578c2ecf20Sopenharmony_ci			perf->ops.disable_metric_set = gen12_disable_metric_set;
43588c2ecf20Sopenharmony_ci			perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
43598c2ecf20Sopenharmony_ci
43608c2ecf20Sopenharmony_ci			perf->ctx_flexeu0_offset = 0;
43618c2ecf20Sopenharmony_ci			perf->ctx_oactxctrl_offset = 0x144;
43628c2ecf20Sopenharmony_ci		}
43638c2ecf20Sopenharmony_ci	}
43648c2ecf20Sopenharmony_ci
43658c2ecf20Sopenharmony_ci	if (perf->ops.enable_metric_set) {
43668c2ecf20Sopenharmony_ci		mutex_init(&perf->lock);
43678c2ecf20Sopenharmony_ci
43688c2ecf20Sopenharmony_ci		oa_sample_rate_hard_limit =
43698c2ecf20Sopenharmony_ci			RUNTIME_INFO(i915)->cs_timestamp_frequency_hz / 2;
43708c2ecf20Sopenharmony_ci
43718c2ecf20Sopenharmony_ci		mutex_init(&perf->metrics_lock);
43728c2ecf20Sopenharmony_ci		idr_init(&perf->metrics_idr);
43738c2ecf20Sopenharmony_ci
43748c2ecf20Sopenharmony_ci		/* We set up some ratelimit state to potentially throttle any
43758c2ecf20Sopenharmony_ci		 * _NOTES about spurious, invalid OA reports which we don't
43768c2ecf20Sopenharmony_ci		 * forward to userspace.
43778c2ecf20Sopenharmony_ci		 *
43788c2ecf20Sopenharmony_ci		 * We print a _NOTE about any throttling when closing the
43798c2ecf20Sopenharmony_ci		 * stream instead of waiting until driver _fini which no one
43808c2ecf20Sopenharmony_ci		 * would ever see.
43818c2ecf20Sopenharmony_ci		 *
43828c2ecf20Sopenharmony_ci		 * Using the same limiting factors as printk_ratelimit()
43838c2ecf20Sopenharmony_ci		 */
43848c2ecf20Sopenharmony_ci		ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
43858c2ecf20Sopenharmony_ci		/* Since we use a DRM_NOTE for spurious reports it would be
43868c2ecf20Sopenharmony_ci		 * inconsistent to let __ratelimit() automatically print a
43878c2ecf20Sopenharmony_ci		 * warning for throttling.
43888c2ecf20Sopenharmony_ci		 */
43898c2ecf20Sopenharmony_ci		ratelimit_set_flags(&perf->spurious_report_rs,
43908c2ecf20Sopenharmony_ci				    RATELIMIT_MSG_ON_RELEASE);
43918c2ecf20Sopenharmony_ci
43928c2ecf20Sopenharmony_ci		ratelimit_state_init(&perf->tail_pointer_race,
43938c2ecf20Sopenharmony_ci				     5 * HZ, 10);
43948c2ecf20Sopenharmony_ci		ratelimit_set_flags(&perf->tail_pointer_race,
43958c2ecf20Sopenharmony_ci				    RATELIMIT_MSG_ON_RELEASE);
43968c2ecf20Sopenharmony_ci
43978c2ecf20Sopenharmony_ci		atomic64_set(&perf->noa_programming_delay,
43988c2ecf20Sopenharmony_ci			     500 * 1000 /* 500us */);
43998c2ecf20Sopenharmony_ci
44008c2ecf20Sopenharmony_ci		perf->i915 = i915;
44018c2ecf20Sopenharmony_ci	}
44028c2ecf20Sopenharmony_ci}
44038c2ecf20Sopenharmony_ci
44048c2ecf20Sopenharmony_cistatic int destroy_config(int id, void *p, void *data)
44058c2ecf20Sopenharmony_ci{
44068c2ecf20Sopenharmony_ci	i915_oa_config_put(p);
44078c2ecf20Sopenharmony_ci	return 0;
44088c2ecf20Sopenharmony_ci}
44098c2ecf20Sopenharmony_ci
44108c2ecf20Sopenharmony_civoid i915_perf_sysctl_register(void)
44118c2ecf20Sopenharmony_ci{
44128c2ecf20Sopenharmony_ci	sysctl_header = register_sysctl_table(dev_root);
44138c2ecf20Sopenharmony_ci}
44148c2ecf20Sopenharmony_ci
44158c2ecf20Sopenharmony_civoid i915_perf_sysctl_unregister(void)
44168c2ecf20Sopenharmony_ci{
44178c2ecf20Sopenharmony_ci	unregister_sysctl_table(sysctl_header);
44188c2ecf20Sopenharmony_ci}
44198c2ecf20Sopenharmony_ci
44208c2ecf20Sopenharmony_ci/**
44218c2ecf20Sopenharmony_ci * i915_perf_fini - Counter part to i915_perf_init()
44228c2ecf20Sopenharmony_ci * @i915: i915 device instance
44238c2ecf20Sopenharmony_ci */
44248c2ecf20Sopenharmony_civoid i915_perf_fini(struct drm_i915_private *i915)
44258c2ecf20Sopenharmony_ci{
44268c2ecf20Sopenharmony_ci	struct i915_perf *perf = &i915->perf;
44278c2ecf20Sopenharmony_ci
44288c2ecf20Sopenharmony_ci	if (!perf->i915)
44298c2ecf20Sopenharmony_ci		return;
44308c2ecf20Sopenharmony_ci
44318c2ecf20Sopenharmony_ci	idr_for_each(&perf->metrics_idr, destroy_config, perf);
44328c2ecf20Sopenharmony_ci	idr_destroy(&perf->metrics_idr);
44338c2ecf20Sopenharmony_ci
44348c2ecf20Sopenharmony_ci	memset(&perf->ops, 0, sizeof(perf->ops));
44358c2ecf20Sopenharmony_ci	perf->i915 = NULL;
44368c2ecf20Sopenharmony_ci}
44378c2ecf20Sopenharmony_ci
44388c2ecf20Sopenharmony_ci/**
44398c2ecf20Sopenharmony_ci * i915_perf_ioctl_version - Version of the i915-perf subsystem
44408c2ecf20Sopenharmony_ci *
44418c2ecf20Sopenharmony_ci * This version number is used by userspace to detect available features.
44428c2ecf20Sopenharmony_ci */
44438c2ecf20Sopenharmony_ciint i915_perf_ioctl_version(void)
44448c2ecf20Sopenharmony_ci{
44458c2ecf20Sopenharmony_ci	/*
44468c2ecf20Sopenharmony_ci	 * 1: Initial version
44478c2ecf20Sopenharmony_ci	 *   I915_PERF_IOCTL_ENABLE
44488c2ecf20Sopenharmony_ci	 *   I915_PERF_IOCTL_DISABLE
44498c2ecf20Sopenharmony_ci	 *
44508c2ecf20Sopenharmony_ci	 * 2: Added runtime modification of OA config.
44518c2ecf20Sopenharmony_ci	 *   I915_PERF_IOCTL_CONFIG
44528c2ecf20Sopenharmony_ci	 *
44538c2ecf20Sopenharmony_ci	 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
44548c2ecf20Sopenharmony_ci	 *    preemption on a particular context so that performance data is
44558c2ecf20Sopenharmony_ci	 *    accessible from a delta of MI_RPC reports without looking at the
44568c2ecf20Sopenharmony_ci	 *    OA buffer.
44578c2ecf20Sopenharmony_ci	 *
44588c2ecf20Sopenharmony_ci	 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
44598c2ecf20Sopenharmony_ci	 *    be run for the duration of the performance recording based on
44608c2ecf20Sopenharmony_ci	 *    their SSEU configuration.
44618c2ecf20Sopenharmony_ci	 *
44628c2ecf20Sopenharmony_ci	 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
44638c2ecf20Sopenharmony_ci	 *    interval for the hrtimer used to check for OA data.
44648c2ecf20Sopenharmony_ci	 */
44658c2ecf20Sopenharmony_ci	return 5;
44668c2ecf20Sopenharmony_ci}
44678c2ecf20Sopenharmony_ci
44688c2ecf20Sopenharmony_ci#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
44698c2ecf20Sopenharmony_ci#include "selftests/i915_perf.c"
44708c2ecf20Sopenharmony_ci#endif
4471