1/* via_irq.c
2 *
3 * Copyright 2004 BEAM Ltd.
4 * Copyright 2002 Tungsten Graphics, Inc.
5 * Copyright 2005 Thomas Hellstrom.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * BEAM LTD, TUNGSTEN GRAPHICS  AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 *    Terry Barnaby <terry1@beam.ltd.uk>
30 *    Keith Whitwell <keith@tungstengraphics.com>
31 *    Thomas Hellstrom <unichrome@shipmail.org>
32 *
33 * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank
34 * interrupt, as well as an infrastructure to handle other interrupts of the chip.
35 * The refresh rate is also calculated for video playback sync purposes.
36 */
37
38#include <drm/drm_device.h>
39#include <drm/drm_vblank.h>
40#include <drm/via_drm.h>
41
42#include "via_drv.h"
43
44#define VIA_REG_INTERRUPT       0x200
45
46/* VIA_REG_INTERRUPT */
47#define VIA_IRQ_GLOBAL	  (1 << 31)
48#define VIA_IRQ_VBLANK_ENABLE   (1 << 19)
49#define VIA_IRQ_VBLANK_PENDING  (1 << 3)
50#define VIA_IRQ_HQV0_ENABLE     (1 << 11)
51#define VIA_IRQ_HQV1_ENABLE     (1 << 25)
52#define VIA_IRQ_HQV0_PENDING    (1 << 9)
53#define VIA_IRQ_HQV1_PENDING    (1 << 10)
54#define VIA_IRQ_DMA0_DD_ENABLE  (1 << 20)
55#define VIA_IRQ_DMA0_TD_ENABLE  (1 << 21)
56#define VIA_IRQ_DMA1_DD_ENABLE  (1 << 22)
57#define VIA_IRQ_DMA1_TD_ENABLE  (1 << 23)
58#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
59#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
60#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
61#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
62
63
64/*
65 * Device-specific IRQs go here. This type might need to be extended with
66 * the register if there are multiple IRQ control registers.
67 * Currently we activate the HQV interrupts of  Unichrome Pro group A.
68 */
69
70static maskarray_t via_pro_group_a_irqs[] = {
71	{VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
72	 0x00000000 },
73	{VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
74	 0x00000000 },
75	{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
76	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
77	{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
78	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
79};
80static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
81static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
82
83static maskarray_t via_unichrome_irqs[] = {
84	{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
85	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
86	{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
87	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
88};
89static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
90static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
91
92
93u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
94{
95	drm_via_private_t *dev_priv = dev->dev_private;
96
97	if (pipe != 0)
98		return 0;
99
100	return atomic_read(&dev_priv->vbl_received);
101}
102
103irqreturn_t via_driver_irq_handler(int irq, void *arg)
104{
105	struct drm_device *dev = (struct drm_device *) arg;
106	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
107	u32 status;
108	int handled = 0;
109	ktime_t cur_vblank;
110	drm_via_irq_t *cur_irq = dev_priv->via_irqs;
111	int i;
112
113	status = via_read(dev_priv, VIA_REG_INTERRUPT);
114	if (status & VIA_IRQ_VBLANK_PENDING) {
115		atomic_inc(&dev_priv->vbl_received);
116		if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
117			cur_vblank = ktime_get();
118			if (dev_priv->last_vblank_valid) {
119				dev_priv->nsec_per_vblank =
120					ktime_sub(cur_vblank,
121						dev_priv->last_vblank) >> 4;
122			}
123			dev_priv->last_vblank = cur_vblank;
124			dev_priv->last_vblank_valid = 1;
125		}
126		if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
127			DRM_DEBUG("nsec per vblank is: %llu\n",
128				  ktime_to_ns(dev_priv->nsec_per_vblank));
129		}
130		drm_handle_vblank(dev, 0);
131		handled = 1;
132	}
133
134	for (i = 0; i < dev_priv->num_irqs; ++i) {
135		if (status & cur_irq->pending_mask) {
136			atomic_inc(&cur_irq->irq_received);
137			wake_up(&cur_irq->irq_queue);
138			handled = 1;
139			if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
140				via_dmablit_handler(dev, 0, 1);
141			else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i)
142				via_dmablit_handler(dev, 1, 1);
143		}
144		cur_irq++;
145	}
146
147	/* Acknowledge interrupts */
148	via_write(dev_priv, VIA_REG_INTERRUPT, status);
149
150
151	if (handled)
152		return IRQ_HANDLED;
153	else
154		return IRQ_NONE;
155}
156
157static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
158{
159	u32 status;
160
161	if (dev_priv) {
162		/* Acknowledge interrupts */
163		status = via_read(dev_priv, VIA_REG_INTERRUPT);
164		via_write(dev_priv, VIA_REG_INTERRUPT, status |
165			  dev_priv->irq_pending_mask);
166	}
167}
168
169int via_enable_vblank(struct drm_device *dev, unsigned int pipe)
170{
171	drm_via_private_t *dev_priv = dev->dev_private;
172	u32 status;
173
174	if (pipe != 0) {
175		DRM_ERROR("%s:  bad crtc %u\n", __func__, pipe);
176		return -EINVAL;
177	}
178
179	status = via_read(dev_priv, VIA_REG_INTERRUPT);
180	via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE);
181
182	via_write8(dev_priv, 0x83d4, 0x11);
183	via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
184
185	return 0;
186}
187
188void via_disable_vblank(struct drm_device *dev, unsigned int pipe)
189{
190	drm_via_private_t *dev_priv = dev->dev_private;
191	u32 status;
192
193	status = via_read(dev_priv, VIA_REG_INTERRUPT);
194	via_write(dev_priv, VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE);
195
196	via_write8(dev_priv, 0x83d4, 0x11);
197	via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
198
199	if (pipe != 0)
200		DRM_ERROR("%s:  bad crtc %u\n", __func__, pipe);
201}
202
203static int
204via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence,
205		    unsigned int *sequence)
206{
207	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
208	unsigned int cur_irq_sequence;
209	drm_via_irq_t *cur_irq;
210	int ret = 0;
211	maskarray_t *masks;
212	int real_irq;
213
214	DRM_DEBUG("\n");
215
216	if (!dev_priv) {
217		DRM_ERROR("called with no initialization\n");
218		return -EINVAL;
219	}
220
221	if (irq >= drm_via_irq_num) {
222		DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
223		return -EINVAL;
224	}
225
226	real_irq = dev_priv->irq_map[irq];
227
228	if (real_irq < 0) {
229		DRM_ERROR("Video IRQ %d not available on this hardware.\n",
230			  irq);
231		return -EINVAL;
232	}
233
234	masks = dev_priv->irq_masks;
235	cur_irq = dev_priv->via_irqs + real_irq;
236
237	if (masks[real_irq][2] && !force_sequence) {
238		VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
239			    ((via_read(dev_priv, masks[irq][2]) & masks[irq][3]) ==
240			     masks[irq][4]));
241		cur_irq_sequence = atomic_read(&cur_irq->irq_received);
242	} else {
243		VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
244			    (((cur_irq_sequence =
245			       atomic_read(&cur_irq->irq_received)) -
246			      *sequence) <= (1 << 23)));
247	}
248	*sequence = cur_irq_sequence;
249	return ret;
250}
251
252
253/*
254 * drm_dma.h hooks
255 */
256
257void via_driver_irq_preinstall(struct drm_device *dev)
258{
259	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
260	u32 status;
261	drm_via_irq_t *cur_irq;
262	int i;
263
264	DRM_DEBUG("dev_priv: %p\n", dev_priv);
265	if (dev_priv) {
266		cur_irq = dev_priv->via_irqs;
267
268		dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
269		dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
270
271		if (dev_priv->chipset == VIA_PRO_GROUP_A ||
272		    dev_priv->chipset == VIA_DX9_0) {
273			dev_priv->irq_masks = via_pro_group_a_irqs;
274			dev_priv->num_irqs = via_num_pro_group_a;
275			dev_priv->irq_map = via_irqmap_pro_group_a;
276		} else {
277			dev_priv->irq_masks = via_unichrome_irqs;
278			dev_priv->num_irqs = via_num_unichrome;
279			dev_priv->irq_map = via_irqmap_unichrome;
280		}
281
282		for (i = 0; i < dev_priv->num_irqs; ++i) {
283			atomic_set(&cur_irq->irq_received, 0);
284			cur_irq->enable_mask = dev_priv->irq_masks[i][0];
285			cur_irq->pending_mask = dev_priv->irq_masks[i][1];
286			init_waitqueue_head(&cur_irq->irq_queue);
287			dev_priv->irq_enable_mask |= cur_irq->enable_mask;
288			dev_priv->irq_pending_mask |= cur_irq->pending_mask;
289			cur_irq++;
290
291			DRM_DEBUG("Initializing IRQ %d\n", i);
292		}
293
294		dev_priv->last_vblank_valid = 0;
295
296		/* Clear VSync interrupt regs */
297		status = via_read(dev_priv, VIA_REG_INTERRUPT);
298		via_write(dev_priv, VIA_REG_INTERRUPT, status &
299			  ~(dev_priv->irq_enable_mask));
300
301		/* Clear bits if they're already high */
302		viadrv_acknowledge_irqs(dev_priv);
303	}
304}
305
306int via_driver_irq_postinstall(struct drm_device *dev)
307{
308	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
309	u32 status;
310
311	DRM_DEBUG("via_driver_irq_postinstall\n");
312	if (!dev_priv)
313		return -EINVAL;
314
315	status = via_read(dev_priv, VIA_REG_INTERRUPT);
316	via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
317		  | dev_priv->irq_enable_mask);
318
319	/* Some magic, oh for some data sheets ! */
320	via_write8(dev_priv, 0x83d4, 0x11);
321	via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
322
323	return 0;
324}
325
326void via_driver_irq_uninstall(struct drm_device *dev)
327{
328	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
329	u32 status;
330
331	DRM_DEBUG("\n");
332	if (dev_priv) {
333
334		/* Some more magic, oh for some data sheets ! */
335
336		via_write8(dev_priv, 0x83d4, 0x11);
337		via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
338
339		status = via_read(dev_priv, VIA_REG_INTERRUPT);
340		via_write(dev_priv, VIA_REG_INTERRUPT, status &
341			  ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
342	}
343}
344
345int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
346{
347	drm_via_irqwait_t *irqwait = data;
348	struct timespec64 now;
349	int ret = 0;
350	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
351	drm_via_irq_t *cur_irq = dev_priv->via_irqs;
352	int force_sequence;
353
354	if (irqwait->request.irq >= dev_priv->num_irqs) {
355		DRM_ERROR("Trying to wait on unknown irq %d\n",
356			  irqwait->request.irq);
357		return -EINVAL;
358	}
359
360	cur_irq += irqwait->request.irq;
361
362	switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
363	case VIA_IRQ_RELATIVE:
364		irqwait->request.sequence +=
365			atomic_read(&cur_irq->irq_received);
366		irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
367	case VIA_IRQ_ABSOLUTE:
368		break;
369	default:
370		return -EINVAL;
371	}
372
373	if (irqwait->request.type & VIA_IRQ_SIGNAL) {
374		DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
375		return -EINVAL;
376	}
377
378	force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
379
380	ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
381				  &irqwait->request.sequence);
382	ktime_get_ts64(&now);
383	irqwait->reply.tval_sec = now.tv_sec;
384	irqwait->reply.tval_usec = now.tv_nsec / NSEC_PER_USEC;
385
386	return ret;
387}
388