1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright © 2006-2011 Intel Corporation
4 *
5 * Authors:
6 *	Eric Anholt <eric@anholt.net>
7 *	Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
8 */
9
10#include <linux/delay.h>
11#include <linux/highmem.h>
12
13#include <drm/drm_crtc.h>
14#include <drm/drm_fourcc.h>
15#include <drm/drm_vblank.h>
16
17#include "framebuffer.h"
18#include "gma_display.h"
19#include "psb_drv.h"
20#include "psb_intel_drv.h"
21#include "psb_intel_reg.h"
22
23/**
24 * Returns whether any output on the specified pipe is of the specified type
25 */
26bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
27{
28	struct drm_device *dev = crtc->dev;
29	struct drm_mode_config *mode_config = &dev->mode_config;
30	struct drm_connector *l_entry;
31
32	list_for_each_entry(l_entry, &mode_config->connector_list, head) {
33		if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
34			struct gma_encoder *gma_encoder =
35						gma_attached_encoder(l_entry);
36			if (gma_encoder->type == type)
37				return true;
38		}
39	}
40
41	return false;
42}
43
44void gma_wait_for_vblank(struct drm_device *dev)
45{
46	/* Wait for 20ms, i.e. one cycle at 50hz. */
47	mdelay(20);
48}
49
50int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
51		      struct drm_framebuffer *old_fb)
52{
53	struct drm_device *dev = crtc->dev;
54	struct drm_psb_private *dev_priv = dev->dev_private;
55	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
56	struct drm_framebuffer *fb = crtc->primary->fb;
57	struct gtt_range *gtt;
58	int pipe = gma_crtc->pipe;
59	const struct psb_offset *map = &dev_priv->regmap[pipe];
60	unsigned long start, offset;
61	u32 dspcntr;
62	int ret = 0;
63
64	if (!gma_power_begin(dev, true))
65		return 0;
66
67	/* no fb bound */
68	if (!fb) {
69		dev_err(dev->dev, "No FB bound\n");
70		goto gma_pipe_cleaner;
71	}
72
73	gtt = to_gtt_range(fb->obj[0]);
74
75	/* We are displaying this buffer, make sure it is actually loaded
76	   into the GTT */
77	ret = psb_gtt_pin(gtt);
78	if (ret < 0)
79		goto gma_pipe_set_base_exit;
80	start = gtt->offset;
81	offset = y * fb->pitches[0] + x * fb->format->cpp[0];
82
83	REG_WRITE(map->stride, fb->pitches[0]);
84
85	dspcntr = REG_READ(map->cntr);
86	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
87
88	switch (fb->format->cpp[0] * 8) {
89	case 8:
90		dspcntr |= DISPPLANE_8BPP;
91		break;
92	case 16:
93		if (fb->format->depth == 15)
94			dspcntr |= DISPPLANE_15_16BPP;
95		else
96			dspcntr |= DISPPLANE_16BPP;
97		break;
98	case 24:
99	case 32:
100		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
101		break;
102	default:
103		dev_err(dev->dev, "Unknown color depth\n");
104		ret = -EINVAL;
105		goto gma_pipe_set_base_exit;
106	}
107	REG_WRITE(map->cntr, dspcntr);
108
109	dev_dbg(dev->dev,
110		"Writing base %08lX %08lX %d %d\n", start, offset, x, y);
111
112	/* FIXME: Investigate whether this really is the base for psb and why
113		  the linear offset is named base for the other chips. map->surf
114		  should be the base and map->linoff the offset for all chips */
115	if (IS_PSB(dev)) {
116		REG_WRITE(map->base, offset + start);
117		REG_READ(map->base);
118	} else {
119		REG_WRITE(map->base, offset);
120		REG_READ(map->base);
121		REG_WRITE(map->surf, start);
122		REG_READ(map->surf);
123	}
124
125gma_pipe_cleaner:
126	/* If there was a previous display we can now unpin it */
127	if (old_fb)
128		psb_gtt_unpin(to_gtt_range(old_fb->obj[0]));
129
130gma_pipe_set_base_exit:
131	gma_power_end(dev);
132	return ret;
133}
134
135/* Loads the palette/gamma unit for the CRTC with the prepared values */
136void gma_crtc_load_lut(struct drm_crtc *crtc)
137{
138	struct drm_device *dev = crtc->dev;
139	struct drm_psb_private *dev_priv = dev->dev_private;
140	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
141	const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
142	int palreg = map->palette;
143	u16 *r, *g, *b;
144	int i;
145
146	/* The clocks have to be on to load the palette. */
147	if (!crtc->enabled)
148		return;
149
150	r = crtc->gamma_store;
151	g = r + crtc->gamma_size;
152	b = g + crtc->gamma_size;
153
154	if (gma_power_begin(dev, false)) {
155		for (i = 0; i < 256; i++) {
156			REG_WRITE(palreg + 4 * i,
157				  (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
158				  (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
159				  ((*b++ >> 8) + gma_crtc->lut_adj[i]));
160		}
161		gma_power_end(dev);
162	} else {
163		for (i = 0; i < 256; i++) {
164			/* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
165			dev_priv->regs.pipe[0].palette[i] =
166				(((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
167				(((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
168				((*b++ >> 8) + gma_crtc->lut_adj[i]);
169		}
170
171	}
172}
173
174int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
175		       u32 size,
176		       struct drm_modeset_acquire_ctx *ctx)
177{
178	gma_crtc_load_lut(crtc);
179
180	return 0;
181}
182
183/**
184 * Sets the power management mode of the pipe and plane.
185 *
186 * This code should probably grow support for turning the cursor off and back
187 * on appropriately at the same time as we're turning the pipe off/on.
188 */
189void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
190{
191	struct drm_device *dev = crtc->dev;
192	struct drm_psb_private *dev_priv = dev->dev_private;
193	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
194	int pipe = gma_crtc->pipe;
195	const struct psb_offset *map = &dev_priv->regmap[pipe];
196	u32 temp;
197
198	/* XXX: When our outputs are all unaware of DPMS modes other than off
199	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
200	 */
201
202	if (IS_CDV(dev))
203		dev_priv->ops->disable_sr(dev);
204
205	switch (mode) {
206	case DRM_MODE_DPMS_ON:
207	case DRM_MODE_DPMS_STANDBY:
208	case DRM_MODE_DPMS_SUSPEND:
209		if (gma_crtc->active)
210			break;
211
212		gma_crtc->active = true;
213
214		/* Enable the DPLL */
215		temp = REG_READ(map->dpll);
216		if ((temp & DPLL_VCO_ENABLE) == 0) {
217			REG_WRITE(map->dpll, temp);
218			REG_READ(map->dpll);
219			/* Wait for the clocks to stabilize. */
220			udelay(150);
221			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
222			REG_READ(map->dpll);
223			/* Wait for the clocks to stabilize. */
224			udelay(150);
225			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
226			REG_READ(map->dpll);
227			/* Wait for the clocks to stabilize. */
228			udelay(150);
229		}
230
231		/* Enable the plane */
232		temp = REG_READ(map->cntr);
233		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
234			REG_WRITE(map->cntr,
235				  temp | DISPLAY_PLANE_ENABLE);
236			/* Flush the plane changes */
237			REG_WRITE(map->base, REG_READ(map->base));
238		}
239
240		udelay(150);
241
242		/* Enable the pipe */
243		temp = REG_READ(map->conf);
244		if ((temp & PIPEACONF_ENABLE) == 0)
245			REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
246
247		temp = REG_READ(map->status);
248		temp &= ~(0xFFFF);
249		temp |= PIPE_FIFO_UNDERRUN;
250		REG_WRITE(map->status, temp);
251		REG_READ(map->status);
252
253		gma_crtc_load_lut(crtc);
254
255		/* Give the overlay scaler a chance to enable
256		 * if it's on this pipe */
257		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
258
259		drm_crtc_vblank_on(crtc);
260		break;
261	case DRM_MODE_DPMS_OFF:
262		if (!gma_crtc->active)
263			break;
264
265		gma_crtc->active = false;
266
267		/* Give the overlay scaler a chance to disable
268		 * if it's on this pipe */
269		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
270
271		/* Disable the VGA plane that we never use */
272		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
273
274		/* Turn off vblank interrupts */
275		drm_crtc_vblank_off(crtc);
276
277		/* Wait for vblank for the disable to take effect */
278		gma_wait_for_vblank(dev);
279
280		/* Disable plane */
281		temp = REG_READ(map->cntr);
282		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
283			REG_WRITE(map->cntr,
284				  temp & ~DISPLAY_PLANE_ENABLE);
285			/* Flush the plane changes */
286			REG_WRITE(map->base, REG_READ(map->base));
287			REG_READ(map->base);
288		}
289
290		/* Disable pipe */
291		temp = REG_READ(map->conf);
292		if ((temp & PIPEACONF_ENABLE) != 0) {
293			REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
294			REG_READ(map->conf);
295		}
296
297		/* Wait for vblank for the disable to take effect. */
298		gma_wait_for_vblank(dev);
299
300		udelay(150);
301
302		/* Disable DPLL */
303		temp = REG_READ(map->dpll);
304		if ((temp & DPLL_VCO_ENABLE) != 0) {
305			REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
306			REG_READ(map->dpll);
307		}
308
309		/* Wait for the clocks to turn off. */
310		udelay(150);
311		break;
312	}
313
314	if (IS_CDV(dev))
315		dev_priv->ops->update_wm(dev, crtc);
316
317	/* Set FIFO watermarks */
318	REG_WRITE(DSPARB, 0x3F3E);
319}
320
321int gma_crtc_cursor_set(struct drm_crtc *crtc,
322			struct drm_file *file_priv,
323			uint32_t handle,
324			uint32_t width, uint32_t height)
325{
326	struct drm_device *dev = crtc->dev;
327	struct drm_psb_private *dev_priv = dev->dev_private;
328	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
329	int pipe = gma_crtc->pipe;
330	uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
331	uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
332	uint32_t temp;
333	size_t addr = 0;
334	struct gtt_range *gt;
335	struct gtt_range *cursor_gt = gma_crtc->cursor_gt;
336	struct drm_gem_object *obj;
337	void *tmp_dst, *tmp_src;
338	int ret = 0, i, cursor_pages;
339
340	/* If we didn't get a handle then turn the cursor off */
341	if (!handle) {
342		temp = CURSOR_MODE_DISABLE;
343		if (gma_power_begin(dev, false)) {
344			REG_WRITE(control, temp);
345			REG_WRITE(base, 0);
346			gma_power_end(dev);
347		}
348
349		/* Unpin the old GEM object */
350		if (gma_crtc->cursor_obj) {
351			gt = container_of(gma_crtc->cursor_obj,
352					  struct gtt_range, gem);
353			psb_gtt_unpin(gt);
354			drm_gem_object_put(gma_crtc->cursor_obj);
355			gma_crtc->cursor_obj = NULL;
356		}
357		return 0;
358	}
359
360	/* Currently we only support 64x64 cursors */
361	if (width != 64 || height != 64) {
362		dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
363		return -EINVAL;
364	}
365
366	obj = drm_gem_object_lookup(file_priv, handle);
367	if (!obj) {
368		ret = -ENOENT;
369		goto unlock;
370	}
371
372	if (obj->size < width * height * 4) {
373		dev_dbg(dev->dev, "Buffer is too small\n");
374		ret = -ENOMEM;
375		goto unref_cursor;
376	}
377
378	gt = container_of(obj, struct gtt_range, gem);
379
380	/* Pin the memory into the GTT */
381	ret = psb_gtt_pin(gt);
382	if (ret) {
383		dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
384		goto unref_cursor;
385	}
386
387	if (dev_priv->ops->cursor_needs_phys) {
388		if (cursor_gt == NULL) {
389			dev_err(dev->dev, "No hardware cursor mem available");
390			ret = -ENOMEM;
391			goto unref_cursor;
392		}
393
394		/* Prevent overflow */
395		if (gt->npage > 4)
396			cursor_pages = 4;
397		else
398			cursor_pages = gt->npage;
399
400		/* Copy the cursor to cursor mem */
401		tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
402		for (i = 0; i < cursor_pages; i++) {
403			tmp_src = kmap(gt->pages[i]);
404			memcpy(tmp_dst, tmp_src, PAGE_SIZE);
405			kunmap(gt->pages[i]);
406			tmp_dst += PAGE_SIZE;
407		}
408
409		addr = gma_crtc->cursor_addr;
410	} else {
411		addr = gt->offset;
412		gma_crtc->cursor_addr = addr;
413	}
414
415	temp = 0;
416	/* set the pipe for the cursor */
417	temp |= (pipe << 28);
418	temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
419
420	if (gma_power_begin(dev, false)) {
421		REG_WRITE(control, temp);
422		REG_WRITE(base, addr);
423		gma_power_end(dev);
424	}
425
426	/* unpin the old bo */
427	if (gma_crtc->cursor_obj) {
428		gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
429		psb_gtt_unpin(gt);
430		drm_gem_object_put(gma_crtc->cursor_obj);
431	}
432
433	gma_crtc->cursor_obj = obj;
434unlock:
435	return ret;
436
437unref_cursor:
438	drm_gem_object_put(obj);
439	return ret;
440}
441
442int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
443{
444	struct drm_device *dev = crtc->dev;
445	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
446	int pipe = gma_crtc->pipe;
447	uint32_t temp = 0;
448	uint32_t addr;
449
450	if (x < 0) {
451		temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
452		x = -x;
453	}
454	if (y < 0) {
455		temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
456		y = -y;
457	}
458
459	temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
460	temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
461
462	addr = gma_crtc->cursor_addr;
463
464	if (gma_power_begin(dev, false)) {
465		REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
466		REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
467		gma_power_end(dev);
468	}
469	return 0;
470}
471
472void gma_crtc_prepare(struct drm_crtc *crtc)
473{
474	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
475	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
476}
477
478void gma_crtc_commit(struct drm_crtc *crtc)
479{
480	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
481	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
482}
483
484void gma_crtc_disable(struct drm_crtc *crtc)
485{
486	struct gtt_range *gt;
487	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
488
489	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
490
491	if (crtc->primary->fb) {
492		gt = to_gtt_range(crtc->primary->fb->obj[0]);
493		psb_gtt_unpin(gt);
494	}
495}
496
497void gma_crtc_destroy(struct drm_crtc *crtc)
498{
499	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
500
501	kfree(gma_crtc->crtc_state);
502	drm_crtc_cleanup(crtc);
503	kfree(gma_crtc);
504}
505
506int gma_crtc_page_flip(struct drm_crtc *crtc,
507		       struct drm_framebuffer *fb,
508		       struct drm_pending_vblank_event *event,
509		       uint32_t page_flip_flags,
510		       struct drm_modeset_acquire_ctx *ctx)
511{
512	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
513	struct drm_framebuffer *current_fb = crtc->primary->fb;
514	struct drm_framebuffer *old_fb = crtc->primary->old_fb;
515	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
516	struct drm_device *dev = crtc->dev;
517	unsigned long flags;
518	int ret;
519
520	if (!crtc_funcs->mode_set_base)
521		return -EINVAL;
522
523	/* Using mode_set_base requires the new fb to be set already. */
524	crtc->primary->fb = fb;
525
526	if (event) {
527		spin_lock_irqsave(&dev->event_lock, flags);
528
529		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
530
531		gma_crtc->page_flip_event = event;
532		spin_unlock_irqrestore(&dev->event_lock, flags);
533
534		/* Call this locked if we want an event at vblank interrupt. */
535		ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
536		if (ret) {
537			spin_lock_irqsave(&dev->event_lock, flags);
538			if (gma_crtc->page_flip_event) {
539				gma_crtc->page_flip_event = NULL;
540				drm_crtc_vblank_put(crtc);
541			}
542			spin_unlock_irqrestore(&dev->event_lock, flags);
543		}
544	} else {
545		ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
546	}
547
548	/* Restore previous fb in case of failure. */
549	if (ret)
550		crtc->primary->fb = current_fb;
551
552	return ret;
553}
554
555int gma_crtc_set_config(struct drm_mode_set *set,
556			struct drm_modeset_acquire_ctx *ctx)
557{
558	struct drm_device *dev = set->crtc->dev;
559	struct drm_psb_private *dev_priv = dev->dev_private;
560	int ret;
561
562	if (!dev_priv->rpm_enabled)
563		return drm_crtc_helper_set_config(set, ctx);
564
565	pm_runtime_forbid(&dev->pdev->dev);
566	ret = drm_crtc_helper_set_config(set, ctx);
567	pm_runtime_allow(&dev->pdev->dev);
568
569	return ret;
570}
571
572/**
573 * Save HW states of given crtc
574 */
575void gma_crtc_save(struct drm_crtc *crtc)
576{
577	struct drm_device *dev = crtc->dev;
578	struct drm_psb_private *dev_priv = dev->dev_private;
579	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
580	struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
581	const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
582	uint32_t palette_reg;
583	int i;
584
585	if (!crtc_state) {
586		dev_err(dev->dev, "No CRTC state found\n");
587		return;
588	}
589
590	crtc_state->saveDSPCNTR = REG_READ(map->cntr);
591	crtc_state->savePIPECONF = REG_READ(map->conf);
592	crtc_state->savePIPESRC = REG_READ(map->src);
593	crtc_state->saveFP0 = REG_READ(map->fp0);
594	crtc_state->saveFP1 = REG_READ(map->fp1);
595	crtc_state->saveDPLL = REG_READ(map->dpll);
596	crtc_state->saveHTOTAL = REG_READ(map->htotal);
597	crtc_state->saveHBLANK = REG_READ(map->hblank);
598	crtc_state->saveHSYNC = REG_READ(map->hsync);
599	crtc_state->saveVTOTAL = REG_READ(map->vtotal);
600	crtc_state->saveVBLANK = REG_READ(map->vblank);
601	crtc_state->saveVSYNC = REG_READ(map->vsync);
602	crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
603
604	/* NOTE: DSPSIZE DSPPOS only for psb */
605	crtc_state->saveDSPSIZE = REG_READ(map->size);
606	crtc_state->saveDSPPOS = REG_READ(map->pos);
607
608	crtc_state->saveDSPBASE = REG_READ(map->base);
609
610	palette_reg = map->palette;
611	for (i = 0; i < 256; ++i)
612		crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
613}
614
615/**
616 * Restore HW states of given crtc
617 */
618void gma_crtc_restore(struct drm_crtc *crtc)
619{
620	struct drm_device *dev = crtc->dev;
621	struct drm_psb_private *dev_priv = dev->dev_private;
622	struct gma_crtc *gma_crtc =  to_gma_crtc(crtc);
623	struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
624	const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
625	uint32_t palette_reg;
626	int i;
627
628	if (!crtc_state) {
629		dev_err(dev->dev, "No crtc state\n");
630		return;
631	}
632
633	if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
634		REG_WRITE(map->dpll,
635			crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
636		REG_READ(map->dpll);
637		udelay(150);
638	}
639
640	REG_WRITE(map->fp0, crtc_state->saveFP0);
641	REG_READ(map->fp0);
642
643	REG_WRITE(map->fp1, crtc_state->saveFP1);
644	REG_READ(map->fp1);
645
646	REG_WRITE(map->dpll, crtc_state->saveDPLL);
647	REG_READ(map->dpll);
648	udelay(150);
649
650	REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
651	REG_WRITE(map->hblank, crtc_state->saveHBLANK);
652	REG_WRITE(map->hsync, crtc_state->saveHSYNC);
653	REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
654	REG_WRITE(map->vblank, crtc_state->saveVBLANK);
655	REG_WRITE(map->vsync, crtc_state->saveVSYNC);
656	REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
657
658	REG_WRITE(map->size, crtc_state->saveDSPSIZE);
659	REG_WRITE(map->pos, crtc_state->saveDSPPOS);
660
661	REG_WRITE(map->src, crtc_state->savePIPESRC);
662	REG_WRITE(map->base, crtc_state->saveDSPBASE);
663	REG_WRITE(map->conf, crtc_state->savePIPECONF);
664
665	gma_wait_for_vblank(dev);
666
667	REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
668	REG_WRITE(map->base, crtc_state->saveDSPBASE);
669
670	gma_wait_for_vblank(dev);
671
672	palette_reg = map->palette;
673	for (i = 0; i < 256; ++i)
674		REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
675}
676
677void gma_encoder_prepare(struct drm_encoder *encoder)
678{
679	const struct drm_encoder_helper_funcs *encoder_funcs =
680	    encoder->helper_private;
681	/* lvds has its own version of prepare see psb_intel_lvds_prepare */
682	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
683}
684
685void gma_encoder_commit(struct drm_encoder *encoder)
686{
687	const struct drm_encoder_helper_funcs *encoder_funcs =
688	    encoder->helper_private;
689	/* lvds has its own version of commit see psb_intel_lvds_commit */
690	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
691}
692
693void gma_encoder_destroy(struct drm_encoder *encoder)
694{
695	struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
696
697	drm_encoder_cleanup(encoder);
698	kfree(intel_encoder);
699}
700
701/* Currently there is only a 1:1 mapping of encoders and connectors */
702struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
703{
704	struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
705
706	return &gma_encoder->base;
707}
708
709void gma_connector_attach_encoder(struct gma_connector *connector,
710				  struct gma_encoder *encoder)
711{
712	connector->encoder = encoder;
713	drm_connector_attach_encoder(&connector->base,
714					  &encoder->base);
715}
716
717#define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
718
719bool gma_pll_is_valid(struct drm_crtc *crtc,
720		      const struct gma_limit_t *limit,
721		      struct gma_clock_t *clock)
722{
723	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
724		GMA_PLL_INVALID("p1 out of range");
725	if (clock->p < limit->p.min || limit->p.max < clock->p)
726		GMA_PLL_INVALID("p out of range");
727	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
728		GMA_PLL_INVALID("m2 out of range");
729	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
730		GMA_PLL_INVALID("m1 out of range");
731	/* On CDV m1 is always 0 */
732	if (clock->m1 <= clock->m2 && clock->m1 != 0)
733		GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
734	if (clock->m < limit->m.min || limit->m.max < clock->m)
735		GMA_PLL_INVALID("m out of range");
736	if (clock->n < limit->n.min || limit->n.max < clock->n)
737		GMA_PLL_INVALID("n out of range");
738	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
739		GMA_PLL_INVALID("vco out of range");
740	/* XXX: We may need to be checking "Dot clock"
741	 * depending on the multiplier, connector, etc.,
742	 * rather than just a single range.
743	 */
744	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
745		GMA_PLL_INVALID("dot out of range");
746
747	return true;
748}
749
750bool gma_find_best_pll(const struct gma_limit_t *limit,
751		       struct drm_crtc *crtc, int target, int refclk,
752		       struct gma_clock_t *best_clock)
753{
754	struct drm_device *dev = crtc->dev;
755	const struct gma_clock_funcs *clock_funcs =
756						to_gma_crtc(crtc)->clock_funcs;
757	struct gma_clock_t clock;
758	int err = target;
759
760	if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
761	    (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
762		/*
763		 * For LVDS, if the panel is on, just rely on its current
764		 * settings for dual-channel.  We haven't figured out how to
765		 * reliably set up different single/dual channel state, if we
766		 * even can.
767		 */
768		if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
769		    LVDS_CLKB_POWER_UP)
770			clock.p2 = limit->p2.p2_fast;
771		else
772			clock.p2 = limit->p2.p2_slow;
773	} else {
774		if (target < limit->p2.dot_limit)
775			clock.p2 = limit->p2.p2_slow;
776		else
777			clock.p2 = limit->p2.p2_fast;
778	}
779
780	memset(best_clock, 0, sizeof(*best_clock));
781
782	/* m1 is always 0 on CDV so the outmost loop will run just once */
783	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
784		for (clock.m2 = limit->m2.min;
785		     (clock.m2 < clock.m1 || clock.m1 == 0) &&
786		      clock.m2 <= limit->m2.max; clock.m2++) {
787			for (clock.n = limit->n.min;
788			     clock.n <= limit->n.max; clock.n++) {
789				for (clock.p1 = limit->p1.min;
790				     clock.p1 <= limit->p1.max;
791				     clock.p1++) {
792					int this_err;
793
794					clock_funcs->clock(refclk, &clock);
795
796					if (!clock_funcs->pll_is_valid(crtc,
797								limit, &clock))
798						continue;
799
800					this_err = abs(clock.dot - target);
801					if (this_err < err) {
802						*best_clock = clock;
803						err = this_err;
804					}
805				}
806			}
807		}
808	}
809
810	return err != target;
811}
812