1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include <drm/drm_atomic_state_helper.h>
7
8#include "intel_atomic.h"
9#include "intel_bw.h"
10#include "intel_cdclk.h"
11#include "intel_display_types.h"
12#include "intel_pm.h"
13#include "intel_sideband.h"
14
15/* Parameters for Qclk Geyserville (QGV) */
16struct intel_qgv_point {
17	u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
18};
19
20struct intel_qgv_info {
21	struct intel_qgv_point points[I915_NUM_QGV_POINTS];
22	u8 num_points;
23	u8 t_bl;
24};
25
26static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
27					 struct intel_qgv_point *sp,
28					 int point)
29{
30	u32 val = 0, val2 = 0;
31	int ret;
32
33	ret = sandybridge_pcode_read(dev_priv,
34				     ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
35				     ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
36				     &val, &val2);
37	if (ret)
38		return ret;
39
40	sp->dclk = val & 0xffff;
41	sp->t_rp = (val & 0xff0000) >> 16;
42	sp->t_rcd = (val & 0xff000000) >> 24;
43
44	sp->t_rdpre = val2 & 0xff;
45	sp->t_ras = (val2 & 0xff00) >> 8;
46
47	sp->t_rc = sp->t_rp + sp->t_ras;
48
49	return 0;
50}
51
52int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
53				  u32 points_mask)
54{
55	int ret;
56
57	/* bspec says to keep retrying for at least 1 ms */
58	ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
59				points_mask,
60				ICL_PCODE_POINTS_RESTRICTED_MASK,
61				ICL_PCODE_POINTS_RESTRICTED,
62				1);
63
64	if (ret < 0) {
65		drm_err(&dev_priv->drm, "Failed to disable qgv points (%d)\n", ret);
66		return ret;
67	}
68
69	return 0;
70}
71
72static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
73			      struct intel_qgv_info *qi)
74{
75	const struct dram_info *dram_info = &dev_priv->dram_info;
76	int i, ret;
77
78	qi->num_points = dram_info->num_qgv_points;
79
80	if (IS_GEN(dev_priv, 12))
81		qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 16;
82	else if (IS_GEN(dev_priv, 11))
83		qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
84
85	if (drm_WARN_ON(&dev_priv->drm,
86			qi->num_points > ARRAY_SIZE(qi->points)))
87		qi->num_points = ARRAY_SIZE(qi->points);
88
89	for (i = 0; i < qi->num_points; i++) {
90		struct intel_qgv_point *sp = &qi->points[i];
91
92		ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i);
93		if (ret)
94			return ret;
95
96		drm_dbg_kms(&dev_priv->drm,
97			    "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
98			    i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
99			    sp->t_rcd, sp->t_rc);
100	}
101
102	return 0;
103}
104
105static int icl_calc_bw(int dclk, int num, int den)
106{
107	/* multiples of 16.666MHz (100/6) */
108	return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6);
109}
110
111static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
112{
113	u16 dclk = 0;
114	int i;
115
116	for (i = 0; i < qi->num_points; i++)
117		dclk = max(dclk, qi->points[i].dclk);
118
119	return dclk;
120}
121
122struct intel_sa_info {
123	u16 displayrtids;
124	u8 deburst, deprogbwlimit;
125};
126
127static const struct intel_sa_info icl_sa_info = {
128	.deburst = 8,
129	.deprogbwlimit = 25, /* GB/s */
130	.displayrtids = 128,
131};
132
133static const struct intel_sa_info tgl_sa_info = {
134	.deburst = 16,
135	.deprogbwlimit = 34, /* GB/s */
136	.displayrtids = 256,
137};
138
139static const struct intel_sa_info rkl_sa_info = {
140	.deburst = 16,
141	.deprogbwlimit = 20, /* GB/s */
142	.displayrtids = 128,
143};
144
145static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
146{
147	struct intel_qgv_info qi = {};
148	bool is_y_tile = true; /* assume y tile may be used */
149	int num_channels = dev_priv->dram_info.num_channels;
150	int deinterleave;
151	int ipqdepth, ipqdepthpch;
152	int dclk_max;
153	int maxdebw;
154	int i, ret;
155
156	ret = icl_get_qgv_points(dev_priv, &qi);
157	if (ret) {
158		drm_dbg_kms(&dev_priv->drm,
159			    "Failed to get memory subsystem information, ignoring bandwidth limits");
160		return ret;
161	}
162
163	deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
164	dclk_max = icl_sagv_max_dclk(&qi);
165
166	ipqdepthpch = 16;
167
168	maxdebw = min(sa->deprogbwlimit * 1000,
169		      icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */
170	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
171
172	for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
173		struct intel_bw_info *bi = &dev_priv->max_bw[i];
174		int clpchgroup;
175		int j;
176
177		clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
178		bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
179
180		bi->num_qgv_points = qi.num_points;
181
182		for (j = 0; j < qi.num_points; j++) {
183			const struct intel_qgv_point *sp = &qi.points[j];
184			int ct, bw;
185
186			/*
187			 * Max row cycle time
188			 *
189			 * FIXME what is the logic behind the
190			 * assumed burst length?
191			 */
192			ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
193				   (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
194			bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
195
196			bi->deratedbw[j] = min(maxdebw,
197					       bw * 9 / 10); /* 90% */
198
199			drm_dbg_kms(&dev_priv->drm,
200				    "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
201				    i, j, bi->num_planes, bi->deratedbw[j]);
202		}
203
204		if (bi->num_planes == 1)
205			break;
206	}
207
208	/*
209	 * In case if SAGV is disabled in BIOS, we always get 1
210	 * SAGV point, but we can't send PCode commands to restrict it
211	 * as it will fail and pointless anyway.
212	 */
213	if (qi.num_points == 1)
214		dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
215	else
216		dev_priv->sagv_status = I915_SAGV_ENABLED;
217
218	return 0;
219}
220
221static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
222			       int num_planes, int qgv_point)
223{
224	int i;
225
226	/*
227	 * Let's return max bw for 0 planes
228	 */
229	num_planes = max(1, num_planes);
230
231	for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
232		const struct intel_bw_info *bi =
233			&dev_priv->max_bw[i];
234
235		/*
236		 * Pcode will not expose all QGV points when
237		 * SAGV is forced to off/min/med/max.
238		 */
239		if (qgv_point >= bi->num_qgv_points)
240			return UINT_MAX;
241
242		if (num_planes >= bi->num_planes)
243			return bi->deratedbw[qgv_point];
244	}
245
246	return 0;
247}
248
249void intel_bw_init_hw(struct drm_i915_private *dev_priv)
250{
251	if (!HAS_DISPLAY(dev_priv))
252		return;
253
254	if (IS_ROCKETLAKE(dev_priv))
255		icl_get_bw_info(dev_priv, &rkl_sa_info);
256	else if (IS_GEN(dev_priv, 12))
257		icl_get_bw_info(dev_priv, &tgl_sa_info);
258	else if (IS_GEN(dev_priv, 11))
259		icl_get_bw_info(dev_priv, &icl_sa_info);
260}
261
262static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
263{
264	/*
265	 * We assume cursors are small enough
266	 * to not not cause bandwidth problems.
267	 */
268	return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
269}
270
271static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
272{
273	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
274	unsigned int data_rate = 0;
275	enum plane_id plane_id;
276
277	for_each_plane_id_on_crtc(crtc, plane_id) {
278		/*
279		 * We assume cursors are small enough
280		 * to not not cause bandwidth problems.
281		 */
282		if (plane_id == PLANE_CURSOR)
283			continue;
284
285		data_rate += crtc_state->data_rate[plane_id];
286	}
287
288	return data_rate;
289}
290
291void intel_bw_crtc_update(struct intel_bw_state *bw_state,
292			  const struct intel_crtc_state *crtc_state)
293{
294	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
295	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
296
297	bw_state->data_rate[crtc->pipe] =
298		intel_bw_crtc_data_rate(crtc_state);
299	bw_state->num_active_planes[crtc->pipe] =
300		intel_bw_crtc_num_active_planes(crtc_state);
301
302	drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
303		    pipe_name(crtc->pipe),
304		    bw_state->data_rate[crtc->pipe],
305		    bw_state->num_active_planes[crtc->pipe]);
306}
307
308static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
309					       const struct intel_bw_state *bw_state)
310{
311	unsigned int num_active_planes = 0;
312	enum pipe pipe;
313
314	for_each_pipe(dev_priv, pipe)
315		num_active_planes += bw_state->num_active_planes[pipe];
316
317	return num_active_planes;
318}
319
320static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
321				       const struct intel_bw_state *bw_state)
322{
323	unsigned int data_rate = 0;
324	enum pipe pipe;
325
326	for_each_pipe(dev_priv, pipe)
327		data_rate += bw_state->data_rate[pipe];
328
329	return data_rate;
330}
331
332struct intel_bw_state *
333intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
334{
335	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
336	struct intel_global_state *bw_state;
337
338	bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->bw_obj);
339
340	return to_intel_bw_state(bw_state);
341}
342
343struct intel_bw_state *
344intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
345{
346	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
347	struct intel_global_state *bw_state;
348
349	bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->bw_obj);
350
351	return to_intel_bw_state(bw_state);
352}
353
354struct intel_bw_state *
355intel_atomic_get_bw_state(struct intel_atomic_state *state)
356{
357	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
358	struct intel_global_state *bw_state;
359
360	bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->bw_obj);
361	if (IS_ERR(bw_state))
362		return ERR_CAST(bw_state);
363
364	return to_intel_bw_state(bw_state);
365}
366
367int skl_bw_calc_min_cdclk(struct intel_atomic_state *state)
368{
369	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
370	struct intel_bw_state *new_bw_state = NULL;
371	struct intel_bw_state *old_bw_state = NULL;
372	const struct intel_crtc_state *crtc_state;
373	struct intel_crtc *crtc;
374	int max_bw = 0;
375	int slice_id;
376	enum pipe pipe;
377	int i;
378
379	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
380		enum plane_id plane_id;
381		struct intel_dbuf_bw *crtc_bw;
382
383		new_bw_state = intel_atomic_get_bw_state(state);
384		if (IS_ERR(new_bw_state))
385			return PTR_ERR(new_bw_state);
386
387		old_bw_state = intel_atomic_get_old_bw_state(state);
388
389		crtc_bw = &new_bw_state->dbuf_bw[crtc->pipe];
390
391		memset(&crtc_bw->used_bw, 0, sizeof(crtc_bw->used_bw));
392
393		if (!crtc_state->hw.active)
394			continue;
395
396		for_each_plane_id_on_crtc(crtc, plane_id) {
397			const struct skl_ddb_entry *plane_alloc =
398				&crtc_state->wm.skl.plane_ddb_y[plane_id];
399			const struct skl_ddb_entry *uv_plane_alloc =
400				&crtc_state->wm.skl.plane_ddb_uv[plane_id];
401			unsigned int data_rate = crtc_state->data_rate[plane_id];
402			unsigned int dbuf_mask = 0;
403
404			dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, plane_alloc);
405			dbuf_mask |= skl_ddb_dbuf_slice_mask(dev_priv, uv_plane_alloc);
406
407			/*
408			 * FIXME: To calculate that more properly we probably
409			 * need to to split per plane data_rate into data_rate_y
410			 * and data_rate_uv for multiplanar formats in order not
411			 * to get accounted those twice if they happen to reside
412			 * on different slices.
413			 * However for pre-icl this would work anyway because
414			 * we have only single slice and for icl+ uv plane has
415			 * non-zero data rate.
416			 * So in worst case those calculation are a bit
417			 * pessimistic, which shouldn't pose any significant
418			 * problem anyway.
419			 */
420			for_each_dbuf_slice_in_mask(slice_id, dbuf_mask)
421				crtc_bw->used_bw[slice_id] += data_rate;
422		}
423	}
424
425	if (!old_bw_state)
426		return 0;
427
428	for_each_pipe(dev_priv, pipe) {
429		struct intel_dbuf_bw *crtc_bw;
430
431		crtc_bw = &new_bw_state->dbuf_bw[pipe];
432
433		for_each_dbuf_slice(slice_id) {
434			/*
435			 * Current experimental observations show that contrary
436			 * to BSpec we get underruns once we exceed 64 * CDCLK
437			 * for slices in total.
438			 * As a temporary measure in order not to keep CDCLK
439			 * bumped up all the time we calculate CDCLK according
440			 * to this formula for  overall bw consumed by slices.
441			 */
442			max_bw += crtc_bw->used_bw[slice_id];
443		}
444	}
445
446	new_bw_state->min_cdclk = max_bw / 64;
447
448	if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
449		int ret = intel_atomic_lock_global_state(&new_bw_state->base);
450
451		if (ret)
452			return ret;
453	}
454
455	return 0;
456}
457
458int intel_bw_calc_min_cdclk(struct intel_atomic_state *state)
459{
460	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
461	struct intel_bw_state *new_bw_state = NULL;
462	struct intel_bw_state *old_bw_state = NULL;
463	const struct intel_crtc_state *crtc_state;
464	struct intel_crtc *crtc;
465	int min_cdclk = 0;
466	enum pipe pipe;
467	int i;
468
469	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
470		new_bw_state = intel_atomic_get_bw_state(state);
471		if (IS_ERR(new_bw_state))
472			return PTR_ERR(new_bw_state);
473
474		old_bw_state = intel_atomic_get_old_bw_state(state);
475	}
476
477	if (!old_bw_state)
478		return 0;
479
480	for_each_pipe(dev_priv, pipe) {
481		struct intel_cdclk_state *cdclk_state;
482
483		cdclk_state = intel_atomic_get_new_cdclk_state(state);
484		if (!cdclk_state)
485			return 0;
486
487		min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
488	}
489
490	new_bw_state->min_cdclk = min_cdclk;
491
492	if (new_bw_state->min_cdclk != old_bw_state->min_cdclk) {
493		int ret = intel_atomic_lock_global_state(&new_bw_state->base);
494
495		if (ret)
496			return ret;
497	}
498
499	return 0;
500}
501
502int intel_bw_atomic_check(struct intel_atomic_state *state)
503{
504	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
505	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
506	struct intel_bw_state *new_bw_state = NULL;
507	const struct intel_bw_state *old_bw_state = NULL;
508	unsigned int data_rate;
509	unsigned int num_active_planes;
510	struct intel_crtc *crtc;
511	int i, ret;
512	u32 allowed_points = 0;
513	unsigned int max_bw_point = 0, max_bw = 0;
514	unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
515	u32 mask = (1 << num_qgv_points) - 1;
516
517	/* FIXME earlier gens need some checks too */
518	if (INTEL_GEN(dev_priv) < 11)
519		return 0;
520
521	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
522					    new_crtc_state, i) {
523		unsigned int old_data_rate =
524			intel_bw_crtc_data_rate(old_crtc_state);
525		unsigned int new_data_rate =
526			intel_bw_crtc_data_rate(new_crtc_state);
527		unsigned int old_active_planes =
528			intel_bw_crtc_num_active_planes(old_crtc_state);
529		unsigned int new_active_planes =
530			intel_bw_crtc_num_active_planes(new_crtc_state);
531
532		/*
533		 * Avoid locking the bw state when
534		 * nothing significant has changed.
535		 */
536		if (old_data_rate == new_data_rate &&
537		    old_active_planes == new_active_planes)
538			continue;
539
540		new_bw_state = intel_atomic_get_bw_state(state);
541		if (IS_ERR(new_bw_state))
542			return PTR_ERR(new_bw_state);
543
544		new_bw_state->data_rate[crtc->pipe] = new_data_rate;
545		new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
546
547		drm_dbg_kms(&dev_priv->drm,
548			    "pipe %c data rate %u num active planes %u\n",
549			    pipe_name(crtc->pipe),
550			    new_bw_state->data_rate[crtc->pipe],
551			    new_bw_state->num_active_planes[crtc->pipe]);
552	}
553
554	if (!new_bw_state)
555		return 0;
556
557	ret = intel_atomic_lock_global_state(&new_bw_state->base);
558	if (ret)
559		return ret;
560
561	data_rate = intel_bw_data_rate(dev_priv, new_bw_state);
562	data_rate = DIV_ROUND_UP(data_rate, 1000);
563
564	num_active_planes = intel_bw_num_active_planes(dev_priv, new_bw_state);
565
566	for (i = 0; i < num_qgv_points; i++) {
567		unsigned int max_data_rate;
568
569		max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
570		/*
571		 * We need to know which qgv point gives us
572		 * maximum bandwidth in order to disable SAGV
573		 * if we find that we exceed SAGV block time
574		 * with watermarks. By that moment we already
575		 * have those, as it is calculated earlier in
576		 * intel_atomic_check,
577		 */
578		if (max_data_rate > max_bw) {
579			max_bw_point = i;
580			max_bw = max_data_rate;
581		}
582		if (max_data_rate >= data_rate)
583			allowed_points |= BIT(i);
584		drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
585			    i, max_data_rate, data_rate);
586	}
587
588	/*
589	 * BSpec states that we always should have at least one allowed point
590	 * left, so if we couldn't - simply reject the configuration for obvious
591	 * reasons.
592	 */
593	if (allowed_points == 0) {
594		drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
595			    " bandwidth %d for display configuration(%d active planes).\n",
596			    data_rate, num_active_planes);
597		return -EINVAL;
598	}
599
600	/*
601	 * Leave only single point with highest bandwidth, if
602	 * we can't enable SAGV due to the increased memory latency it may
603	 * cause.
604	 */
605	if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
606		allowed_points = BIT(max_bw_point);
607		drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
608			    max_bw_point);
609	}
610	/*
611	 * We store the ones which need to be masked as that is what PCode
612	 * actually accepts as a parameter.
613	 */
614	new_bw_state->qgv_points_mask = ~allowed_points & mask;
615
616	old_bw_state = intel_atomic_get_old_bw_state(state);
617	/*
618	 * If the actual mask had changed we need to make sure that
619	 * the commits are serialized(in case this is a nomodeset, nonblocking)
620	 */
621	if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
622		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
623		if (ret)
624			return ret;
625	}
626
627	return 0;
628}
629
630static struct intel_global_state *
631intel_bw_duplicate_state(struct intel_global_obj *obj)
632{
633	struct intel_bw_state *state;
634
635	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
636	if (!state)
637		return NULL;
638
639	return &state->base;
640}
641
642static void intel_bw_destroy_state(struct intel_global_obj *obj,
643				   struct intel_global_state *state)
644{
645	kfree(state);
646}
647
648static const struct intel_global_state_funcs intel_bw_funcs = {
649	.atomic_duplicate_state = intel_bw_duplicate_state,
650	.atomic_destroy_state = intel_bw_destroy_state,
651};
652
653int intel_bw_init(struct drm_i915_private *dev_priv)
654{
655	struct intel_bw_state *state;
656
657	state = kzalloc(sizeof(*state), GFP_KERNEL);
658	if (!state)
659		return -ENOMEM;
660
661	intel_atomic_global_obj_init(dev_priv, &dev_priv->bw_obj,
662				     &state->base, &intel_bw_funcs);
663
664	return 0;
665}
666