1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
4 *
5 * Author: Martyn Welch <martyn.welch@ge.com>
6 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 *
8 * Based on work by Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
10 *
11 * Derived from ca91c042.c by Michael Wyrick
12 */
13
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/pci.h>
19#include <linux/dma-mapping.h>
20#include <linux/poll.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/sched.h>
24#include <linux/slab.h>
25#include <linux/time.h>
26#include <linux/io.h>
27#include <linux/uaccess.h>
28#include <linux/vme.h>
29
30#include "../vme_bridge.h"
31#include "vme_ca91cx42.h"
32
33static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
34static void ca91cx42_remove(struct pci_dev *);
35
36/* Module parameters */
37static int geoid;
38
39static const char driver_name[] = "vme_ca91cx42";
40
41static const struct pci_device_id ca91cx42_ids[] = {
42	{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
43	{ },
44};
45
46MODULE_DEVICE_TABLE(pci, ca91cx42_ids);
47
48static struct pci_driver ca91cx42_driver = {
49	.name = driver_name,
50	.id_table = ca91cx42_ids,
51	.probe = ca91cx42_probe,
52	.remove = ca91cx42_remove,
53};
54
55static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
56{
57	wake_up(&bridge->dma_queue);
58
59	return CA91CX42_LINT_DMA;
60}
61
62static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
63{
64	int i;
65	u32 serviced = 0;
66
67	for (i = 0; i < 4; i++) {
68		if (stat & CA91CX42_LINT_LM[i]) {
69			/* We only enable interrupts if the callback is set */
70			bridge->lm_callback[i](bridge->lm_data[i]);
71			serviced |= CA91CX42_LINT_LM[i];
72		}
73	}
74
75	return serviced;
76}
77
78/* XXX This needs to be split into 4 queues */
79static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
80{
81	wake_up(&bridge->mbox_queue);
82
83	return CA91CX42_LINT_MBOX;
84}
85
86static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
87{
88	wake_up(&bridge->iack_queue);
89
90	return CA91CX42_LINT_SW_IACK;
91}
92
93static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
94{
95	int val;
96	struct ca91cx42_driver *bridge;
97
98	bridge = ca91cx42_bridge->driver_priv;
99
100	val = ioread32(bridge->base + DGCS);
101
102	if (!(val & 0x00000800)) {
103		dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA "
104			"Read Error DGCS=%08X\n", val);
105	}
106
107	return CA91CX42_LINT_VERR;
108}
109
110static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
111{
112	int val;
113	struct ca91cx42_driver *bridge;
114
115	bridge = ca91cx42_bridge->driver_priv;
116
117	val = ioread32(bridge->base + DGCS);
118
119	if (!(val & 0x00000800))
120		dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA "
121			"Read Error DGCS=%08X\n", val);
122
123	return CA91CX42_LINT_LERR;
124}
125
126
127static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
128	int stat)
129{
130	int vec, i, serviced = 0;
131	struct ca91cx42_driver *bridge;
132
133	bridge = ca91cx42_bridge->driver_priv;
134
135
136	for (i = 7; i > 0; i--) {
137		if (stat & (1 << i)) {
138			vec = ioread32(bridge->base +
139				CA91CX42_V_STATID[i]) & 0xff;
140
141			vme_irq_handler(ca91cx42_bridge, i, vec);
142
143			serviced |= (1 << i);
144		}
145	}
146
147	return serviced;
148}
149
150static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
151{
152	u32 stat, enable, serviced = 0;
153	struct vme_bridge *ca91cx42_bridge;
154	struct ca91cx42_driver *bridge;
155
156	ca91cx42_bridge = ptr;
157
158	bridge = ca91cx42_bridge->driver_priv;
159
160	enable = ioread32(bridge->base + LINT_EN);
161	stat = ioread32(bridge->base + LINT_STAT);
162
163	/* Only look at unmasked interrupts */
164	stat &= enable;
165
166	if (unlikely(!stat))
167		return IRQ_NONE;
168
169	if (stat & CA91CX42_LINT_DMA)
170		serviced |= ca91cx42_DMA_irqhandler(bridge);
171	if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
172			CA91CX42_LINT_LM3))
173		serviced |= ca91cx42_LM_irqhandler(bridge, stat);
174	if (stat & CA91CX42_LINT_MBOX)
175		serviced |= ca91cx42_MB_irqhandler(bridge, stat);
176	if (stat & CA91CX42_LINT_SW_IACK)
177		serviced |= ca91cx42_IACK_irqhandler(bridge);
178	if (stat & CA91CX42_LINT_VERR)
179		serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge);
180	if (stat & CA91CX42_LINT_LERR)
181		serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge);
182	if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
183			CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
184			CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
185			CA91CX42_LINT_VIRQ7))
186		serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
187
188	/* Clear serviced interrupts */
189	iowrite32(serviced, bridge->base + LINT_STAT);
190
191	return IRQ_HANDLED;
192}
193
194static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
195{
196	int result, tmp;
197	struct pci_dev *pdev;
198	struct ca91cx42_driver *bridge;
199
200	bridge = ca91cx42_bridge->driver_priv;
201
202	/* Need pdev */
203	pdev = to_pci_dev(ca91cx42_bridge->parent);
204
205	/* Disable interrupts from PCI to VME */
206	iowrite32(0, bridge->base + VINT_EN);
207
208	/* Disable PCI interrupts */
209	iowrite32(0, bridge->base + LINT_EN);
210	/* Clear Any Pending PCI Interrupts */
211	iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
212
213	result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
214			driver_name, ca91cx42_bridge);
215	if (result) {
216		dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
217		       pdev->irq);
218		return result;
219	}
220
221	/* Ensure all interrupts are mapped to PCI Interrupt 0 */
222	iowrite32(0, bridge->base + LINT_MAP0);
223	iowrite32(0, bridge->base + LINT_MAP1);
224	iowrite32(0, bridge->base + LINT_MAP2);
225
226	/* Enable DMA, mailbox & LM Interrupts */
227	tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
228		CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
229		CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
230
231	iowrite32(tmp, bridge->base + LINT_EN);
232
233	return 0;
234}
235
236static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
237	struct pci_dev *pdev)
238{
239	struct vme_bridge *ca91cx42_bridge;
240
241	/* Disable interrupts from PCI to VME */
242	iowrite32(0, bridge->base + VINT_EN);
243
244	/* Disable PCI interrupts */
245	iowrite32(0, bridge->base + LINT_EN);
246	/* Clear Any Pending PCI Interrupts */
247	iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
248
249	ca91cx42_bridge = container_of((void *)bridge, struct vme_bridge,
250				       driver_priv);
251	free_irq(pdev->irq, ca91cx42_bridge);
252}
253
254static int ca91cx42_iack_received(struct ca91cx42_driver *bridge, int level)
255{
256	u32 tmp;
257
258	tmp = ioread32(bridge->base + LINT_STAT);
259
260	if (tmp & (1 << level))
261		return 0;
262	else
263		return 1;
264}
265
266/*
267 * Set up an VME interrupt
268 */
269static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
270	int state, int sync)
271
272{
273	struct pci_dev *pdev;
274	u32 tmp;
275	struct ca91cx42_driver *bridge;
276
277	bridge = ca91cx42_bridge->driver_priv;
278
279	/* Enable IRQ level */
280	tmp = ioread32(bridge->base + LINT_EN);
281
282	if (state == 0)
283		tmp &= ~CA91CX42_LINT_VIRQ[level];
284	else
285		tmp |= CA91CX42_LINT_VIRQ[level];
286
287	iowrite32(tmp, bridge->base + LINT_EN);
288
289	if ((state == 0) && (sync != 0)) {
290		pdev = to_pci_dev(ca91cx42_bridge->parent);
291
292		synchronize_irq(pdev->irq);
293	}
294}
295
296static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
297	int statid)
298{
299	u32 tmp;
300	struct ca91cx42_driver *bridge;
301
302	bridge = ca91cx42_bridge->driver_priv;
303
304	/* Universe can only generate even vectors */
305	if (statid & 1)
306		return -EINVAL;
307
308	mutex_lock(&bridge->vme_int);
309
310	tmp = ioread32(bridge->base + VINT_EN);
311
312	/* Set Status/ID */
313	iowrite32(statid << 24, bridge->base + STATID);
314
315	/* Assert VMEbus IRQ */
316	tmp = tmp | (1 << (level + 24));
317	iowrite32(tmp, bridge->base + VINT_EN);
318
319	/* Wait for IACK */
320	wait_event_interruptible(bridge->iack_queue,
321				 ca91cx42_iack_received(bridge, level));
322
323	/* Return interrupt to low state */
324	tmp = ioread32(bridge->base + VINT_EN);
325	tmp = tmp & ~(1 << (level + 24));
326	iowrite32(tmp, bridge->base + VINT_EN);
327
328	mutex_unlock(&bridge->vme_int);
329
330	return 0;
331}
332
333static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
334	unsigned long long vme_base, unsigned long long size,
335	dma_addr_t pci_base, u32 aspace, u32 cycle)
336{
337	unsigned int i, addr = 0, granularity;
338	unsigned int temp_ctl = 0;
339	unsigned int vme_bound, pci_offset;
340	struct vme_bridge *ca91cx42_bridge;
341	struct ca91cx42_driver *bridge;
342
343	ca91cx42_bridge = image->parent;
344
345	bridge = ca91cx42_bridge->driver_priv;
346
347	i = image->number;
348
349	switch (aspace) {
350	case VME_A16:
351		addr |= CA91CX42_VSI_CTL_VAS_A16;
352		break;
353	case VME_A24:
354		addr |= CA91CX42_VSI_CTL_VAS_A24;
355		break;
356	case VME_A32:
357		addr |= CA91CX42_VSI_CTL_VAS_A32;
358		break;
359	case VME_USER1:
360		addr |= CA91CX42_VSI_CTL_VAS_USER1;
361		break;
362	case VME_USER2:
363		addr |= CA91CX42_VSI_CTL_VAS_USER2;
364		break;
365	case VME_A64:
366	case VME_CRCSR:
367	case VME_USER3:
368	case VME_USER4:
369	default:
370		dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
371		return -EINVAL;
372		break;
373	}
374
375	/*
376	 * Bound address is a valid address for the window, adjust
377	 * accordingly
378	 */
379	vme_bound = vme_base + size;
380	pci_offset = pci_base - vme_base;
381
382	if ((i == 0) || (i == 4))
383		granularity = 0x1000;
384	else
385		granularity = 0x10000;
386
387	if (vme_base & (granularity - 1)) {
388		dev_err(ca91cx42_bridge->parent, "Invalid VME base "
389			"alignment\n");
390		return -EINVAL;
391	}
392	if (vme_bound & (granularity - 1)) {
393		dev_err(ca91cx42_bridge->parent, "Invalid VME bound "
394			"alignment\n");
395		return -EINVAL;
396	}
397	if (pci_offset & (granularity - 1)) {
398		dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset "
399			"alignment\n");
400		return -EINVAL;
401	}
402
403	/* Disable while we are mucking around */
404	temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
405	temp_ctl &= ~CA91CX42_VSI_CTL_EN;
406	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
407
408	/* Setup mapping */
409	iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
410	iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
411	iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
412
413	/* Setup address space */
414	temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
415	temp_ctl |= addr;
416
417	/* Setup cycle types */
418	temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
419	if (cycle & VME_SUPER)
420		temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
421	if (cycle & VME_USER)
422		temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
423	if (cycle & VME_PROG)
424		temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
425	if (cycle & VME_DATA)
426		temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
427
428	/* Write ctl reg without enable */
429	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
430
431	if (enabled)
432		temp_ctl |= CA91CX42_VSI_CTL_EN;
433
434	iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
435
436	return 0;
437}
438
439static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
440	unsigned long long *vme_base, unsigned long long *size,
441	dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
442{
443	unsigned int i, granularity = 0, ctl = 0;
444	unsigned long long vme_bound, pci_offset;
445	struct ca91cx42_driver *bridge;
446
447	bridge = image->parent->driver_priv;
448
449	i = image->number;
450
451	if ((i == 0) || (i == 4))
452		granularity = 0x1000;
453	else
454		granularity = 0x10000;
455
456	/* Read Registers */
457	ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
458
459	*vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
460	vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
461	pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
462
463	*pci_base = (dma_addr_t)*vme_base + pci_offset;
464	*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
465
466	*enabled = 0;
467	*aspace = 0;
468	*cycle = 0;
469
470	if (ctl & CA91CX42_VSI_CTL_EN)
471		*enabled = 1;
472
473	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
474		*aspace = VME_A16;
475	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
476		*aspace = VME_A24;
477	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
478		*aspace = VME_A32;
479	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
480		*aspace = VME_USER1;
481	if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
482		*aspace = VME_USER2;
483
484	if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
485		*cycle |= VME_SUPER;
486	if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
487		*cycle |= VME_USER;
488	if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
489		*cycle |= VME_PROG;
490	if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
491		*cycle |= VME_DATA;
492
493	return 0;
494}
495
496/*
497 * Allocate and map PCI Resource
498 */
499static int ca91cx42_alloc_resource(struct vme_master_resource *image,
500	unsigned long long size)
501{
502	unsigned long long existing_size;
503	int retval = 0;
504	struct pci_dev *pdev;
505	struct vme_bridge *ca91cx42_bridge;
506
507	ca91cx42_bridge = image->parent;
508
509	/* Find pci_dev container of dev */
510	if (!ca91cx42_bridge->parent) {
511		dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
512		return -EINVAL;
513	}
514	pdev = to_pci_dev(ca91cx42_bridge->parent);
515
516	existing_size = (unsigned long long)(image->bus_resource.end -
517		image->bus_resource.start);
518
519	/* If the existing size is OK, return */
520	if (existing_size == (size - 1))
521		return 0;
522
523	if (existing_size != 0) {
524		iounmap(image->kern_base);
525		image->kern_base = NULL;
526		kfree(image->bus_resource.name);
527		release_resource(&image->bus_resource);
528		memset(&image->bus_resource, 0, sizeof(image->bus_resource));
529	}
530
531	if (!image->bus_resource.name) {
532		image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
533		if (!image->bus_resource.name) {
534			retval = -ENOMEM;
535			goto err_name;
536		}
537	}
538
539	sprintf((char *)image->bus_resource.name, "%s.%d",
540		ca91cx42_bridge->name, image->number);
541
542	image->bus_resource.start = 0;
543	image->bus_resource.end = (unsigned long)size;
544	image->bus_resource.flags = IORESOURCE_MEM;
545
546	retval = pci_bus_alloc_resource(pdev->bus,
547		&image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
548		0, NULL, NULL);
549	if (retval) {
550		dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
551			"resource for window %d size 0x%lx start 0x%lx\n",
552			image->number, (unsigned long)size,
553			(unsigned long)image->bus_resource.start);
554		goto err_resource;
555	}
556
557	image->kern_base = ioremap(
558		image->bus_resource.start, size);
559	if (!image->kern_base) {
560		dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
561		retval = -ENOMEM;
562		goto err_remap;
563	}
564
565	return 0;
566
567err_remap:
568	release_resource(&image->bus_resource);
569err_resource:
570	kfree(image->bus_resource.name);
571	memset(&image->bus_resource, 0, sizeof(image->bus_resource));
572err_name:
573	return retval;
574}
575
576/*
577 * Free and unmap PCI Resource
578 */
579static void ca91cx42_free_resource(struct vme_master_resource *image)
580{
581	iounmap(image->kern_base);
582	image->kern_base = NULL;
583	release_resource(&image->bus_resource);
584	kfree(image->bus_resource.name);
585	memset(&image->bus_resource, 0, sizeof(image->bus_resource));
586}
587
588
589static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
590	unsigned long long vme_base, unsigned long long size, u32 aspace,
591	u32 cycle, u32 dwidth)
592{
593	int retval = 0;
594	unsigned int i, granularity = 0;
595	unsigned int temp_ctl = 0;
596	unsigned long long pci_bound, vme_offset, pci_base;
597	struct vme_bridge *ca91cx42_bridge;
598	struct ca91cx42_driver *bridge;
599
600	ca91cx42_bridge = image->parent;
601
602	bridge = ca91cx42_bridge->driver_priv;
603
604	i = image->number;
605
606	if ((i == 0) || (i == 4))
607		granularity = 0x1000;
608	else
609		granularity = 0x10000;
610
611	/* Verify input data */
612	if (vme_base & (granularity - 1)) {
613		dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
614			"alignment\n");
615		retval = -EINVAL;
616		goto err_window;
617	}
618	if (size & (granularity - 1)) {
619		dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
620			"alignment\n");
621		retval = -EINVAL;
622		goto err_window;
623	}
624
625	spin_lock(&image->lock);
626
627	/*
628	 * Let's allocate the resource here rather than further up the stack as
629	 * it avoids pushing loads of bus dependent stuff up the stack
630	 */
631	retval = ca91cx42_alloc_resource(image, size);
632	if (retval) {
633		spin_unlock(&image->lock);
634		dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
635			"for resource name\n");
636		retval = -ENOMEM;
637		goto err_res;
638	}
639
640	pci_base = (unsigned long long)image->bus_resource.start;
641
642	/*
643	 * Bound address is a valid address for the window, adjust
644	 * according to window granularity.
645	 */
646	pci_bound = pci_base + size;
647	vme_offset = vme_base - pci_base;
648
649	/* Disable while we are mucking around */
650	temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
651	temp_ctl &= ~CA91CX42_LSI_CTL_EN;
652	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
653
654	/* Setup cycle types */
655	temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
656	if (cycle & VME_BLT)
657		temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
658	if (cycle & VME_MBLT)
659		temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
660
661	/* Setup data width */
662	temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
663	switch (dwidth) {
664	case VME_D8:
665		temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
666		break;
667	case VME_D16:
668		temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
669		break;
670	case VME_D32:
671		temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
672		break;
673	case VME_D64:
674		temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
675		break;
676	default:
677		spin_unlock(&image->lock);
678		dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
679		retval = -EINVAL;
680		goto err_dwidth;
681		break;
682	}
683
684	/* Setup address space */
685	temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
686	switch (aspace) {
687	case VME_A16:
688		temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
689		break;
690	case VME_A24:
691		temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
692		break;
693	case VME_A32:
694		temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
695		break;
696	case VME_CRCSR:
697		temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
698		break;
699	case VME_USER1:
700		temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
701		break;
702	case VME_USER2:
703		temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
704		break;
705	case VME_A64:
706	case VME_USER3:
707	case VME_USER4:
708	default:
709		spin_unlock(&image->lock);
710		dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
711		retval = -EINVAL;
712		goto err_aspace;
713		break;
714	}
715
716	temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
717	if (cycle & VME_SUPER)
718		temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
719	if (cycle & VME_PROG)
720		temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
721
722	/* Setup mapping */
723	iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
724	iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
725	iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
726
727	/* Write ctl reg without enable */
728	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
729
730	if (enabled)
731		temp_ctl |= CA91CX42_LSI_CTL_EN;
732
733	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
734
735	spin_unlock(&image->lock);
736	return 0;
737
738err_aspace:
739err_dwidth:
740	ca91cx42_free_resource(image);
741err_res:
742err_window:
743	return retval;
744}
745
746static int __ca91cx42_master_get(struct vme_master_resource *image,
747	int *enabled, unsigned long long *vme_base, unsigned long long *size,
748	u32 *aspace, u32 *cycle, u32 *dwidth)
749{
750	unsigned int i, ctl;
751	unsigned long long pci_base, pci_bound, vme_offset;
752	struct ca91cx42_driver *bridge;
753
754	bridge = image->parent->driver_priv;
755
756	i = image->number;
757
758	ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
759
760	pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
761	vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
762	pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
763
764	*vme_base = pci_base + vme_offset;
765	*size = (unsigned long long)(pci_bound - pci_base);
766
767	*enabled = 0;
768	*aspace = 0;
769	*cycle = 0;
770	*dwidth = 0;
771
772	if (ctl & CA91CX42_LSI_CTL_EN)
773		*enabled = 1;
774
775	/* Setup address space */
776	switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
777	case CA91CX42_LSI_CTL_VAS_A16:
778		*aspace = VME_A16;
779		break;
780	case CA91CX42_LSI_CTL_VAS_A24:
781		*aspace = VME_A24;
782		break;
783	case CA91CX42_LSI_CTL_VAS_A32:
784		*aspace = VME_A32;
785		break;
786	case CA91CX42_LSI_CTL_VAS_CRCSR:
787		*aspace = VME_CRCSR;
788		break;
789	case CA91CX42_LSI_CTL_VAS_USER1:
790		*aspace = VME_USER1;
791		break;
792	case CA91CX42_LSI_CTL_VAS_USER2:
793		*aspace = VME_USER2;
794		break;
795	}
796
797	/* XXX Not sure howto check for MBLT */
798	/* Setup cycle types */
799	if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
800		*cycle |= VME_BLT;
801	else
802		*cycle |= VME_SCT;
803
804	if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
805		*cycle |= VME_SUPER;
806	else
807		*cycle |= VME_USER;
808
809	if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
810		*cycle = VME_PROG;
811	else
812		*cycle = VME_DATA;
813
814	/* Setup data width */
815	switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
816	case CA91CX42_LSI_CTL_VDW_D8:
817		*dwidth = VME_D8;
818		break;
819	case CA91CX42_LSI_CTL_VDW_D16:
820		*dwidth = VME_D16;
821		break;
822	case CA91CX42_LSI_CTL_VDW_D32:
823		*dwidth = VME_D32;
824		break;
825	case CA91CX42_LSI_CTL_VDW_D64:
826		*dwidth = VME_D64;
827		break;
828	}
829
830	return 0;
831}
832
833static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
834	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
835	u32 *cycle, u32 *dwidth)
836{
837	int retval;
838
839	spin_lock(&image->lock);
840
841	retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
842		cycle, dwidth);
843
844	spin_unlock(&image->lock);
845
846	return retval;
847}
848
849static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
850	void *buf, size_t count, loff_t offset)
851{
852	ssize_t retval;
853	void __iomem *addr = image->kern_base + offset;
854	unsigned int done = 0;
855	unsigned int count32;
856
857	if (count == 0)
858		return 0;
859
860	spin_lock(&image->lock);
861
862	/* The following code handles VME address alignment. We cannot use
863	 * memcpy_xxx here because it may cut data transfers in to 8-bit
864	 * cycles when D16 or D32 cycles are required on the VME bus.
865	 * On the other hand, the bridge itself assures that the maximum data
866	 * cycle configured for the transfer is used and splits it
867	 * automatically for non-aligned addresses, so we don't want the
868	 * overhead of needlessly forcing small transfers for the entire cycle.
869	 */
870	if ((uintptr_t)addr & 0x1) {
871		*(u8 *)buf = ioread8(addr);
872		done += 1;
873		if (done == count)
874			goto out;
875	}
876	if ((uintptr_t)(addr + done) & 0x2) {
877		if ((count - done) < 2) {
878			*(u8 *)(buf + done) = ioread8(addr + done);
879			done += 1;
880			goto out;
881		} else {
882			*(u16 *)(buf + done) = ioread16(addr + done);
883			done += 2;
884		}
885	}
886
887	count32 = (count - done) & ~0x3;
888	while (done < count32) {
889		*(u32 *)(buf + done) = ioread32(addr + done);
890		done += 4;
891	}
892
893	if ((count - done) & 0x2) {
894		*(u16 *)(buf + done) = ioread16(addr + done);
895		done += 2;
896	}
897	if ((count - done) & 0x1) {
898		*(u8 *)(buf + done) = ioread8(addr + done);
899		done += 1;
900	}
901out:
902	retval = count;
903	spin_unlock(&image->lock);
904
905	return retval;
906}
907
908static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
909	void *buf, size_t count, loff_t offset)
910{
911	ssize_t retval;
912	void __iomem *addr = image->kern_base + offset;
913	unsigned int done = 0;
914	unsigned int count32;
915
916	if (count == 0)
917		return 0;
918
919	spin_lock(&image->lock);
920
921	/* Here we apply for the same strategy we do in master_read
922	 * function in order to assure the correct cycles.
923	 */
924	if ((uintptr_t)addr & 0x1) {
925		iowrite8(*(u8 *)buf, addr);
926		done += 1;
927		if (done == count)
928			goto out;
929	}
930	if ((uintptr_t)(addr + done) & 0x2) {
931		if ((count - done) < 2) {
932			iowrite8(*(u8 *)(buf + done), addr + done);
933			done += 1;
934			goto out;
935		} else {
936			iowrite16(*(u16 *)(buf + done), addr + done);
937			done += 2;
938		}
939	}
940
941	count32 = (count - done) & ~0x3;
942	while (done < count32) {
943		iowrite32(*(u32 *)(buf + done), addr + done);
944		done += 4;
945	}
946
947	if ((count - done) & 0x2) {
948		iowrite16(*(u16 *)(buf + done), addr + done);
949		done += 2;
950	}
951	if ((count - done) & 0x1) {
952		iowrite8(*(u8 *)(buf + done), addr + done);
953		done += 1;
954	}
955out:
956	retval = count;
957
958	spin_unlock(&image->lock);
959
960	return retval;
961}
962
963static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
964	unsigned int mask, unsigned int compare, unsigned int swap,
965	loff_t offset)
966{
967	u32 result;
968	uintptr_t pci_addr;
969	struct ca91cx42_driver *bridge;
970	struct device *dev;
971
972	bridge = image->parent->driver_priv;
973	dev = image->parent->parent;
974
975	/* Find the PCI address that maps to the desired VME address */
976
977	/* Locking as we can only do one of these at a time */
978	mutex_lock(&bridge->vme_rmw);
979
980	/* Lock image */
981	spin_lock(&image->lock);
982
983	pci_addr = (uintptr_t)image->kern_base + offset;
984
985	/* Address must be 4-byte aligned */
986	if (pci_addr & 0x3) {
987		dev_err(dev, "RMW Address not 4-byte aligned\n");
988		result = -EINVAL;
989		goto out;
990	}
991
992	/* Ensure RMW Disabled whilst configuring */
993	iowrite32(0, bridge->base + SCYC_CTL);
994
995	/* Configure registers */
996	iowrite32(mask, bridge->base + SCYC_EN);
997	iowrite32(compare, bridge->base + SCYC_CMP);
998	iowrite32(swap, bridge->base + SCYC_SWP);
999	iowrite32(pci_addr, bridge->base + SCYC_ADDR);
1000
1001	/* Enable RMW */
1002	iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
1003
1004	/* Kick process off with a read to the required address. */
1005	result = ioread32(image->kern_base + offset);
1006
1007	/* Disable RMW */
1008	iowrite32(0, bridge->base + SCYC_CTL);
1009
1010out:
1011	spin_unlock(&image->lock);
1012
1013	mutex_unlock(&bridge->vme_rmw);
1014
1015	return result;
1016}
1017
1018static int ca91cx42_dma_list_add(struct vme_dma_list *list,
1019	struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1020{
1021	struct ca91cx42_dma_entry *entry, *prev;
1022	struct vme_dma_pci *pci_attr;
1023	struct vme_dma_vme *vme_attr;
1024	dma_addr_t desc_ptr;
1025	int retval = 0;
1026	struct device *dev;
1027
1028	dev = list->parent->parent->parent;
1029
1030	/* XXX descriptor must be aligned on 64-bit boundaries */
1031	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1032	if (!entry) {
1033		retval = -ENOMEM;
1034		goto err_mem;
1035	}
1036
1037	/* Test descriptor alignment */
1038	if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) {
1039		dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
1040			"required: %p\n", &entry->descriptor);
1041		retval = -EINVAL;
1042		goto err_align;
1043	}
1044
1045	memset(&entry->descriptor, 0, sizeof(entry->descriptor));
1046
1047	if (dest->type == VME_DMA_VME) {
1048		entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
1049		vme_attr = dest->private;
1050		pci_attr = src->private;
1051	} else {
1052		vme_attr = src->private;
1053		pci_attr = dest->private;
1054	}
1055
1056	/* Check we can do fulfill required attributes */
1057	if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
1058		VME_USER2)) != 0) {
1059
1060		dev_err(dev, "Unsupported cycle type\n");
1061		retval = -EINVAL;
1062		goto err_aspace;
1063	}
1064
1065	if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
1066		VME_PROG | VME_DATA)) != 0) {
1067
1068		dev_err(dev, "Unsupported cycle type\n");
1069		retval = -EINVAL;
1070		goto err_cycle;
1071	}
1072
1073	/* Check to see if we can fulfill source and destination */
1074	if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
1075		((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
1076
1077		dev_err(dev, "Cannot perform transfer with this "
1078			"source-destination combination\n");
1079		retval = -EINVAL;
1080		goto err_direct;
1081	}
1082
1083	/* Setup cycle types */
1084	if (vme_attr->cycle & VME_BLT)
1085		entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
1086
1087	/* Setup data width */
1088	switch (vme_attr->dwidth) {
1089	case VME_D8:
1090		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
1091		break;
1092	case VME_D16:
1093		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
1094		break;
1095	case VME_D32:
1096		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
1097		break;
1098	case VME_D64:
1099		entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
1100		break;
1101	default:
1102		dev_err(dev, "Invalid data width\n");
1103		return -EINVAL;
1104	}
1105
1106	/* Setup address space */
1107	switch (vme_attr->aspace) {
1108	case VME_A16:
1109		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
1110		break;
1111	case VME_A24:
1112		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
1113		break;
1114	case VME_A32:
1115		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
1116		break;
1117	case VME_USER1:
1118		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
1119		break;
1120	case VME_USER2:
1121		entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
1122		break;
1123	default:
1124		dev_err(dev, "Invalid address space\n");
1125		return -EINVAL;
1126		break;
1127	}
1128
1129	if (vme_attr->cycle & VME_SUPER)
1130		entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
1131	if (vme_attr->cycle & VME_PROG)
1132		entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
1133
1134	entry->descriptor.dtbc = count;
1135	entry->descriptor.dla = pci_attr->address;
1136	entry->descriptor.dva = vme_attr->address;
1137	entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
1138
1139	/* Add to list */
1140	list_add_tail(&entry->list, &list->entries);
1141
1142	/* Fill out previous descriptors "Next Address" */
1143	if (entry->list.prev != &list->entries) {
1144		prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
1145			list);
1146		/* We need the bus address for the pointer */
1147		desc_ptr = virt_to_bus(&entry->descriptor);
1148		prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
1149	}
1150
1151	return 0;
1152
1153err_cycle:
1154err_aspace:
1155err_direct:
1156err_align:
1157	kfree(entry);
1158err_mem:
1159	return retval;
1160}
1161
1162static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
1163{
1164	u32 tmp;
1165	struct ca91cx42_driver *bridge;
1166
1167	bridge = ca91cx42_bridge->driver_priv;
1168
1169	tmp = ioread32(bridge->base + DGCS);
1170
1171	if (tmp & CA91CX42_DGCS_ACT)
1172		return 0;
1173	else
1174		return 1;
1175}
1176
1177static int ca91cx42_dma_list_exec(struct vme_dma_list *list)
1178{
1179	struct vme_dma_resource *ctrlr;
1180	struct ca91cx42_dma_entry *entry;
1181	int retval;
1182	dma_addr_t bus_addr;
1183	u32 val;
1184	struct device *dev;
1185	struct ca91cx42_driver *bridge;
1186
1187	ctrlr = list->parent;
1188
1189	bridge = ctrlr->parent->driver_priv;
1190	dev = ctrlr->parent->parent;
1191
1192	mutex_lock(&ctrlr->mtx);
1193
1194	if (!(list_empty(&ctrlr->running))) {
1195		/*
1196		 * XXX We have an active DMA transfer and currently haven't
1197		 *     sorted out the mechanism for "pending" DMA transfers.
1198		 *     Return busy.
1199		 */
1200		/* Need to add to pending here */
1201		mutex_unlock(&ctrlr->mtx);
1202		return -EBUSY;
1203	} else {
1204		list_add(&list->list, &ctrlr->running);
1205	}
1206
1207	/* Get first bus address and write into registers */
1208	entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry,
1209		list);
1210
1211	bus_addr = virt_to_bus(&entry->descriptor);
1212
1213	mutex_unlock(&ctrlr->mtx);
1214
1215	iowrite32(0, bridge->base + DTBC);
1216	iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
1217
1218	/* Start the operation */
1219	val = ioread32(bridge->base + DGCS);
1220
1221	/* XXX Could set VMEbus On and Off Counters here */
1222	val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
1223
1224	val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
1225		CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1226		CA91CX42_DGCS_PERR);
1227
1228	iowrite32(val, bridge->base + DGCS);
1229
1230	val |= CA91CX42_DGCS_GO;
1231
1232	iowrite32(val, bridge->base + DGCS);
1233
1234	retval = wait_event_interruptible(bridge->dma_queue,
1235					  ca91cx42_dma_busy(ctrlr->parent));
1236
1237	if (retval) {
1238		val = ioread32(bridge->base + DGCS);
1239		iowrite32(val | CA91CX42_DGCS_STOP_REQ, bridge->base + DGCS);
1240		/* Wait for the operation to abort */
1241		wait_event(bridge->dma_queue,
1242			   ca91cx42_dma_busy(ctrlr->parent));
1243		retval = -EINTR;
1244		goto exit;
1245	}
1246
1247	/*
1248	 * Read status register, this register is valid until we kick off a
1249	 * new transfer.
1250	 */
1251	val = ioread32(bridge->base + DGCS);
1252
1253	if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1254		CA91CX42_DGCS_PERR)) {
1255
1256		dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val);
1257		val = ioread32(bridge->base + DCTL);
1258		retval = -EIO;
1259	}
1260
1261exit:
1262	/* Remove list from running list */
1263	mutex_lock(&ctrlr->mtx);
1264	list_del(&list->list);
1265	mutex_unlock(&ctrlr->mtx);
1266
1267	return retval;
1268
1269}
1270
1271static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
1272{
1273	struct list_head *pos, *temp;
1274	struct ca91cx42_dma_entry *entry;
1275
1276	/* detach and free each entry */
1277	list_for_each_safe(pos, temp, &list->entries) {
1278		list_del(pos);
1279		entry = list_entry(pos, struct ca91cx42_dma_entry, list);
1280		kfree(entry);
1281	}
1282
1283	return 0;
1284}
1285
1286/*
1287 * All 4 location monitors reside at the same base - this is therefore a
1288 * system wide configuration.
1289 *
1290 * This does not enable the LM monitor - that should be done when the first
1291 * callback is attached and disabled when the last callback is removed.
1292 */
1293static int ca91cx42_lm_set(struct vme_lm_resource *lm,
1294	unsigned long long lm_base, u32 aspace, u32 cycle)
1295{
1296	u32 temp_base, lm_ctl = 0;
1297	int i;
1298	struct ca91cx42_driver *bridge;
1299	struct device *dev;
1300
1301	bridge = lm->parent->driver_priv;
1302	dev = lm->parent->parent;
1303
1304	/* Check the alignment of the location monitor */
1305	temp_base = (u32)lm_base;
1306	if (temp_base & 0xffff) {
1307		dev_err(dev, "Location monitor must be aligned to 64KB "
1308			"boundary");
1309		return -EINVAL;
1310	}
1311
1312	mutex_lock(&lm->mtx);
1313
1314	/* If we already have a callback attached, we can't move it! */
1315	for (i = 0; i < lm->monitors; i++) {
1316		if (bridge->lm_callback[i]) {
1317			mutex_unlock(&lm->mtx);
1318			dev_err(dev, "Location monitor callback attached, "
1319				"can't reset\n");
1320			return -EBUSY;
1321		}
1322	}
1323
1324	switch (aspace) {
1325	case VME_A16:
1326		lm_ctl |= CA91CX42_LM_CTL_AS_A16;
1327		break;
1328	case VME_A24:
1329		lm_ctl |= CA91CX42_LM_CTL_AS_A24;
1330		break;
1331	case VME_A32:
1332		lm_ctl |= CA91CX42_LM_CTL_AS_A32;
1333		break;
1334	default:
1335		mutex_unlock(&lm->mtx);
1336		dev_err(dev, "Invalid address space\n");
1337		return -EINVAL;
1338		break;
1339	}
1340
1341	if (cycle & VME_SUPER)
1342		lm_ctl |= CA91CX42_LM_CTL_SUPR;
1343	if (cycle & VME_USER)
1344		lm_ctl |= CA91CX42_LM_CTL_NPRIV;
1345	if (cycle & VME_PROG)
1346		lm_ctl |= CA91CX42_LM_CTL_PGM;
1347	if (cycle & VME_DATA)
1348		lm_ctl |= CA91CX42_LM_CTL_DATA;
1349
1350	iowrite32(lm_base, bridge->base + LM_BS);
1351	iowrite32(lm_ctl, bridge->base + LM_CTL);
1352
1353	mutex_unlock(&lm->mtx);
1354
1355	return 0;
1356}
1357
1358/* Get configuration of the callback monitor and return whether it is enabled
1359 * or disabled.
1360 */
1361static int ca91cx42_lm_get(struct vme_lm_resource *lm,
1362	unsigned long long *lm_base, u32 *aspace, u32 *cycle)
1363{
1364	u32 lm_ctl, enabled = 0;
1365	struct ca91cx42_driver *bridge;
1366
1367	bridge = lm->parent->driver_priv;
1368
1369	mutex_lock(&lm->mtx);
1370
1371	*lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
1372	lm_ctl = ioread32(bridge->base + LM_CTL);
1373
1374	if (lm_ctl & CA91CX42_LM_CTL_EN)
1375		enabled = 1;
1376
1377	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
1378		*aspace = VME_A16;
1379	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
1380		*aspace = VME_A24;
1381	if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
1382		*aspace = VME_A32;
1383
1384	*cycle = 0;
1385	if (lm_ctl & CA91CX42_LM_CTL_SUPR)
1386		*cycle |= VME_SUPER;
1387	if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
1388		*cycle |= VME_USER;
1389	if (lm_ctl & CA91CX42_LM_CTL_PGM)
1390		*cycle |= VME_PROG;
1391	if (lm_ctl & CA91CX42_LM_CTL_DATA)
1392		*cycle |= VME_DATA;
1393
1394	mutex_unlock(&lm->mtx);
1395
1396	return enabled;
1397}
1398
1399/*
1400 * Attach a callback to a specific location monitor.
1401 *
1402 * Callback will be passed the monitor triggered.
1403 */
1404static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
1405	void (*callback)(void *), void *data)
1406{
1407	u32 lm_ctl, tmp;
1408	struct ca91cx42_driver *bridge;
1409	struct device *dev;
1410
1411	bridge = lm->parent->driver_priv;
1412	dev = lm->parent->parent;
1413
1414	mutex_lock(&lm->mtx);
1415
1416	/* Ensure that the location monitor is configured - need PGM or DATA */
1417	lm_ctl = ioread32(bridge->base + LM_CTL);
1418	if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
1419		mutex_unlock(&lm->mtx);
1420		dev_err(dev, "Location monitor not properly configured\n");
1421		return -EINVAL;
1422	}
1423
1424	/* Check that a callback isn't already attached */
1425	if (bridge->lm_callback[monitor]) {
1426		mutex_unlock(&lm->mtx);
1427		dev_err(dev, "Existing callback attached\n");
1428		return -EBUSY;
1429	}
1430
1431	/* Attach callback */
1432	bridge->lm_callback[monitor] = callback;
1433	bridge->lm_data[monitor] = data;
1434
1435	/* Enable Location Monitor interrupt */
1436	tmp = ioread32(bridge->base + LINT_EN);
1437	tmp |= CA91CX42_LINT_LM[monitor];
1438	iowrite32(tmp, bridge->base + LINT_EN);
1439
1440	/* Ensure that global Location Monitor Enable set */
1441	if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
1442		lm_ctl |= CA91CX42_LM_CTL_EN;
1443		iowrite32(lm_ctl, bridge->base + LM_CTL);
1444	}
1445
1446	mutex_unlock(&lm->mtx);
1447
1448	return 0;
1449}
1450
1451/*
1452 * Detach a callback function forn a specific location monitor.
1453 */
1454static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
1455{
1456	u32 tmp;
1457	struct ca91cx42_driver *bridge;
1458
1459	bridge = lm->parent->driver_priv;
1460
1461	mutex_lock(&lm->mtx);
1462
1463	/* Disable Location Monitor and ensure previous interrupts are clear */
1464	tmp = ioread32(bridge->base + LINT_EN);
1465	tmp &= ~CA91CX42_LINT_LM[monitor];
1466	iowrite32(tmp, bridge->base + LINT_EN);
1467
1468	iowrite32(CA91CX42_LINT_LM[monitor],
1469		 bridge->base + LINT_STAT);
1470
1471	/* Detach callback */
1472	bridge->lm_callback[monitor] = NULL;
1473	bridge->lm_data[monitor] = NULL;
1474
1475	/* If all location monitors disabled, disable global Location Monitor */
1476	if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
1477			CA91CX42_LINT_LM3)) == 0) {
1478		tmp = ioread32(bridge->base + LM_CTL);
1479		tmp &= ~CA91CX42_LM_CTL_EN;
1480		iowrite32(tmp, bridge->base + LM_CTL);
1481	}
1482
1483	mutex_unlock(&lm->mtx);
1484
1485	return 0;
1486}
1487
1488static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
1489{
1490	u32 slot = 0;
1491	struct ca91cx42_driver *bridge;
1492
1493	bridge = ca91cx42_bridge->driver_priv;
1494
1495	if (!geoid) {
1496		slot = ioread32(bridge->base + VCSR_BS);
1497		slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
1498	} else
1499		slot = geoid;
1500
1501	return (int)slot;
1502
1503}
1504
1505static void *ca91cx42_alloc_consistent(struct device *parent, size_t size,
1506	dma_addr_t *dma)
1507{
1508	struct pci_dev *pdev;
1509
1510	/* Find pci_dev container of dev */
1511	pdev = to_pci_dev(parent);
1512
1513	return pci_alloc_consistent(pdev, size, dma);
1514}
1515
1516static void ca91cx42_free_consistent(struct device *parent, size_t size,
1517	void *vaddr, dma_addr_t dma)
1518{
1519	struct pci_dev *pdev;
1520
1521	/* Find pci_dev container of dev */
1522	pdev = to_pci_dev(parent);
1523
1524	pci_free_consistent(pdev, size, vaddr, dma);
1525}
1526
1527/*
1528 * Configure CR/CSR space
1529 *
1530 * Access to the CR/CSR can be configured at power-up. The location of the
1531 * CR/CSR registers in the CR/CSR address space is determined by the boards
1532 * Auto-ID or Geographic address. This function ensures that the window is
1533 * enabled at an offset consistent with the boards geopgraphic address.
1534 */
1535static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
1536	struct pci_dev *pdev)
1537{
1538	unsigned int crcsr_addr;
1539	int tmp, slot;
1540	struct ca91cx42_driver *bridge;
1541
1542	bridge = ca91cx42_bridge->driver_priv;
1543
1544	slot = ca91cx42_slot_get(ca91cx42_bridge);
1545
1546	/* Write CSR Base Address if slot ID is supplied as a module param */
1547	if (geoid)
1548		iowrite32(geoid << 27, bridge->base + VCSR_BS);
1549
1550	dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
1551	if (slot == 0) {
1552		dev_err(&pdev->dev, "Slot number is unset, not configuring "
1553			"CR/CSR space\n");
1554		return -EINVAL;
1555	}
1556
1557	/* Allocate mem for CR/CSR image */
1558	bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
1559						     &bridge->crcsr_bus);
1560	if (!bridge->crcsr_kernel) {
1561		dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
1562			"image\n");
1563		return -ENOMEM;
1564	}
1565
1566	crcsr_addr = slot * (512 * 1024);
1567	iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
1568
1569	tmp = ioread32(bridge->base + VCSR_CTL);
1570	tmp |= CA91CX42_VCSR_CTL_EN;
1571	iowrite32(tmp, bridge->base + VCSR_CTL);
1572
1573	return 0;
1574}
1575
1576static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
1577	struct pci_dev *pdev)
1578{
1579	u32 tmp;
1580	struct ca91cx42_driver *bridge;
1581
1582	bridge = ca91cx42_bridge->driver_priv;
1583
1584	/* Turn off CR/CSR space */
1585	tmp = ioread32(bridge->base + VCSR_CTL);
1586	tmp &= ~CA91CX42_VCSR_CTL_EN;
1587	iowrite32(tmp, bridge->base + VCSR_CTL);
1588
1589	/* Free image */
1590	iowrite32(0, bridge->base + VCSR_TO);
1591
1592	pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
1593		bridge->crcsr_bus);
1594}
1595
1596static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1597{
1598	int retval, i;
1599	u32 data;
1600	struct list_head *pos = NULL, *n;
1601	struct vme_bridge *ca91cx42_bridge;
1602	struct ca91cx42_driver *ca91cx42_device;
1603	struct vme_master_resource *master_image;
1604	struct vme_slave_resource *slave_image;
1605	struct vme_dma_resource *dma_ctrlr;
1606	struct vme_lm_resource *lm;
1607
1608	/* We want to support more than one of each bridge so we need to
1609	 * dynamically allocate the bridge structure
1610	 */
1611	ca91cx42_bridge = kzalloc(sizeof(*ca91cx42_bridge), GFP_KERNEL);
1612	if (!ca91cx42_bridge) {
1613		retval = -ENOMEM;
1614		goto err_struct;
1615	}
1616	vme_init_bridge(ca91cx42_bridge);
1617
1618	ca91cx42_device = kzalloc(sizeof(*ca91cx42_device), GFP_KERNEL);
1619	if (!ca91cx42_device) {
1620		retval = -ENOMEM;
1621		goto err_driver;
1622	}
1623
1624	ca91cx42_bridge->driver_priv = ca91cx42_device;
1625
1626	/* Enable the device */
1627	retval = pci_enable_device(pdev);
1628	if (retval) {
1629		dev_err(&pdev->dev, "Unable to enable device\n");
1630		goto err_enable;
1631	}
1632
1633	/* Map Registers */
1634	retval = pci_request_regions(pdev, driver_name);
1635	if (retval) {
1636		dev_err(&pdev->dev, "Unable to reserve resources\n");
1637		goto err_resource;
1638	}
1639
1640	/* map registers in BAR 0 */
1641	ca91cx42_device->base = ioremap(pci_resource_start(pdev, 0),
1642		4096);
1643	if (!ca91cx42_device->base) {
1644		dev_err(&pdev->dev, "Unable to remap CRG region\n");
1645		retval = -EIO;
1646		goto err_remap;
1647	}
1648
1649	/* Check to see if the mapping worked out */
1650	data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1651	if (data != PCI_VENDOR_ID_TUNDRA) {
1652		dev_err(&pdev->dev, "PCI_ID check failed\n");
1653		retval = -EIO;
1654		goto err_test;
1655	}
1656
1657	/* Initialize wait queues & mutual exclusion flags */
1658	init_waitqueue_head(&ca91cx42_device->dma_queue);
1659	init_waitqueue_head(&ca91cx42_device->iack_queue);
1660	mutex_init(&ca91cx42_device->vme_int);
1661	mutex_init(&ca91cx42_device->vme_rmw);
1662
1663	ca91cx42_bridge->parent = &pdev->dev;
1664	strcpy(ca91cx42_bridge->name, driver_name);
1665
1666	/* Setup IRQ */
1667	retval = ca91cx42_irq_init(ca91cx42_bridge);
1668	if (retval != 0) {
1669		dev_err(&pdev->dev, "Chip Initialization failed.\n");
1670		goto err_irq;
1671	}
1672
1673	/* Add master windows to list */
1674	for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1675		master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
1676		if (!master_image) {
1677			retval = -ENOMEM;
1678			goto err_master;
1679		}
1680		master_image->parent = ca91cx42_bridge;
1681		spin_lock_init(&master_image->lock);
1682		master_image->locked = 0;
1683		master_image->number = i;
1684		master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1685			VME_CRCSR | VME_USER1 | VME_USER2;
1686		master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1687			VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1688		master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1689		memset(&master_image->bus_resource, 0,
1690		       sizeof(master_image->bus_resource));
1691		master_image->kern_base  = NULL;
1692		list_add_tail(&master_image->list,
1693			&ca91cx42_bridge->master_resources);
1694	}
1695
1696	/* Add slave windows to list */
1697	for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1698		slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
1699		if (!slave_image) {
1700			retval = -ENOMEM;
1701			goto err_slave;
1702		}
1703		slave_image->parent = ca91cx42_bridge;
1704		mutex_init(&slave_image->mtx);
1705		slave_image->locked = 0;
1706		slave_image->number = i;
1707		slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1708			VME_USER2;
1709
1710		/* Only windows 0 and 4 support A16 */
1711		if (i == 0 || i == 4)
1712			slave_image->address_attr |= VME_A16;
1713
1714		slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1715			VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1716		list_add_tail(&slave_image->list,
1717			&ca91cx42_bridge->slave_resources);
1718	}
1719
1720	/* Add dma engines to list */
1721	for (i = 0; i < CA91C142_MAX_DMA; i++) {
1722		dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
1723		if (!dma_ctrlr) {
1724			retval = -ENOMEM;
1725			goto err_dma;
1726		}
1727		dma_ctrlr->parent = ca91cx42_bridge;
1728		mutex_init(&dma_ctrlr->mtx);
1729		dma_ctrlr->locked = 0;
1730		dma_ctrlr->number = i;
1731		dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
1732			VME_DMA_MEM_TO_VME;
1733		INIT_LIST_HEAD(&dma_ctrlr->pending);
1734		INIT_LIST_HEAD(&dma_ctrlr->running);
1735		list_add_tail(&dma_ctrlr->list,
1736			&ca91cx42_bridge->dma_resources);
1737	}
1738
1739	/* Add location monitor to list */
1740	lm = kmalloc(sizeof(*lm), GFP_KERNEL);
1741	if (!lm) {
1742		retval = -ENOMEM;
1743		goto err_lm;
1744	}
1745	lm->parent = ca91cx42_bridge;
1746	mutex_init(&lm->mtx);
1747	lm->locked = 0;
1748	lm->number = 1;
1749	lm->monitors = 4;
1750	list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources);
1751
1752	ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1753	ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1754	ca91cx42_bridge->master_get = ca91cx42_master_get;
1755	ca91cx42_bridge->master_set = ca91cx42_master_set;
1756	ca91cx42_bridge->master_read = ca91cx42_master_read;
1757	ca91cx42_bridge->master_write = ca91cx42_master_write;
1758	ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1759	ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1760	ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1761	ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1762	ca91cx42_bridge->irq_set = ca91cx42_irq_set;
1763	ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
1764	ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1765	ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1766	ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1767	ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1768	ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1769	ca91cx42_bridge->alloc_consistent = ca91cx42_alloc_consistent;
1770	ca91cx42_bridge->free_consistent = ca91cx42_free_consistent;
1771
1772	data = ioread32(ca91cx42_device->base + MISC_CTL);
1773	dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1774		(data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1775	dev_info(&pdev->dev, "Slot ID is %d\n",
1776		ca91cx42_slot_get(ca91cx42_bridge));
1777
1778	if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev))
1779		dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1780
1781	/* Need to save ca91cx42_bridge pointer locally in link list for use in
1782	 * ca91cx42_remove()
1783	 */
1784	retval = vme_register_bridge(ca91cx42_bridge);
1785	if (retval != 0) {
1786		dev_err(&pdev->dev, "Chip Registration failed.\n");
1787		goto err_reg;
1788	}
1789
1790	pci_set_drvdata(pdev, ca91cx42_bridge);
1791
1792	return 0;
1793
1794err_reg:
1795	ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1796err_lm:
1797	/* resources are stored in link list */
1798	list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) {
1799		lm = list_entry(pos, struct vme_lm_resource, list);
1800		list_del(pos);
1801		kfree(lm);
1802	}
1803err_dma:
1804	/* resources are stored in link list */
1805	list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) {
1806		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1807		list_del(pos);
1808		kfree(dma_ctrlr);
1809	}
1810err_slave:
1811	/* resources are stored in link list */
1812	list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) {
1813		slave_image = list_entry(pos, struct vme_slave_resource, list);
1814		list_del(pos);
1815		kfree(slave_image);
1816	}
1817err_master:
1818	/* resources are stored in link list */
1819	list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) {
1820		master_image = list_entry(pos, struct vme_master_resource,
1821			list);
1822		list_del(pos);
1823		kfree(master_image);
1824	}
1825
1826	ca91cx42_irq_exit(ca91cx42_device, pdev);
1827err_irq:
1828err_test:
1829	iounmap(ca91cx42_device->base);
1830err_remap:
1831	pci_release_regions(pdev);
1832err_resource:
1833	pci_disable_device(pdev);
1834err_enable:
1835	kfree(ca91cx42_device);
1836err_driver:
1837	kfree(ca91cx42_bridge);
1838err_struct:
1839	return retval;
1840
1841}
1842
1843static void ca91cx42_remove(struct pci_dev *pdev)
1844{
1845	struct list_head *pos = NULL, *n;
1846	struct vme_master_resource *master_image;
1847	struct vme_slave_resource *slave_image;
1848	struct vme_dma_resource *dma_ctrlr;
1849	struct vme_lm_resource *lm;
1850	struct ca91cx42_driver *bridge;
1851	struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
1852
1853	bridge = ca91cx42_bridge->driver_priv;
1854
1855
1856	/* Turn off Ints */
1857	iowrite32(0, bridge->base + LINT_EN);
1858
1859	/* Turn off the windows */
1860	iowrite32(0x00800000, bridge->base + LSI0_CTL);
1861	iowrite32(0x00800000, bridge->base + LSI1_CTL);
1862	iowrite32(0x00800000, bridge->base + LSI2_CTL);
1863	iowrite32(0x00800000, bridge->base + LSI3_CTL);
1864	iowrite32(0x00800000, bridge->base + LSI4_CTL);
1865	iowrite32(0x00800000, bridge->base + LSI5_CTL);
1866	iowrite32(0x00800000, bridge->base + LSI6_CTL);
1867	iowrite32(0x00800000, bridge->base + LSI7_CTL);
1868	iowrite32(0x00F00000, bridge->base + VSI0_CTL);
1869	iowrite32(0x00F00000, bridge->base + VSI1_CTL);
1870	iowrite32(0x00F00000, bridge->base + VSI2_CTL);
1871	iowrite32(0x00F00000, bridge->base + VSI3_CTL);
1872	iowrite32(0x00F00000, bridge->base + VSI4_CTL);
1873	iowrite32(0x00F00000, bridge->base + VSI5_CTL);
1874	iowrite32(0x00F00000, bridge->base + VSI6_CTL);
1875	iowrite32(0x00F00000, bridge->base + VSI7_CTL);
1876
1877	vme_unregister_bridge(ca91cx42_bridge);
1878
1879	ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1880
1881	/* resources are stored in link list */
1882	list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) {
1883		lm = list_entry(pos, struct vme_lm_resource, list);
1884		list_del(pos);
1885		kfree(lm);
1886	}
1887
1888	/* resources are stored in link list */
1889	list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) {
1890		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1891		list_del(pos);
1892		kfree(dma_ctrlr);
1893	}
1894
1895	/* resources are stored in link list */
1896	list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) {
1897		slave_image = list_entry(pos, struct vme_slave_resource, list);
1898		list_del(pos);
1899		kfree(slave_image);
1900	}
1901
1902	/* resources are stored in link list */
1903	list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) {
1904		master_image = list_entry(pos, struct vme_master_resource,
1905			list);
1906		list_del(pos);
1907		kfree(master_image);
1908	}
1909
1910	ca91cx42_irq_exit(bridge, pdev);
1911
1912	iounmap(bridge->base);
1913
1914	pci_release_regions(pdev);
1915
1916	pci_disable_device(pdev);
1917
1918	kfree(ca91cx42_bridge);
1919}
1920
1921module_pci_driver(ca91cx42_driver);
1922
1923MODULE_PARM_DESC(geoid, "Override geographical addressing");
1924module_param(geoid, int, 0);
1925
1926MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1927MODULE_LICENSE("GPL");
1928