1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Support for the Tundra TSI148 VME-PCI Bridge Chip
4 *
5 * Author: Martyn Welch <martyn.welch@ge.com>
6 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 *
8 * Based on work by Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
10 */
11
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/mm.h>
15 #include <linux/types.h>
16 #include <linux/errno.h>
17 #include <linux/proc_fs.h>
18 #include <linux/pci.h>
19 #include <linux/poll.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/time.h>
26 #include <linux/io.h>
27 #include <linux/uaccess.h>
28 #include <linux/byteorder/generic.h>
29 #include <linux/vme.h>
30
31 #include "../vme_bridge.h"
32 #include "vme_tsi148.h"
33
34 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
35 static void tsi148_remove(struct pci_dev *);
36
37
38 /* Module parameter */
39 static bool err_chk;
40 static int geoid;
41
42 static const char driver_name[] = "vme_tsi148";
43
44 static const struct pci_device_id tsi148_ids[] = {
45 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
46 { },
47 };
48
49 MODULE_DEVICE_TABLE(pci, tsi148_ids);
50
51 static struct pci_driver tsi148_driver = {
52 .name = driver_name,
53 .id_table = tsi148_ids,
54 .probe = tsi148_probe,
55 .remove = tsi148_remove,
56 };
57
reg_join(unsigned int high, unsigned int low, unsigned long long *variable)58 static void reg_join(unsigned int high, unsigned int low,
59 unsigned long long *variable)
60 {
61 *variable = (unsigned long long)high << 32;
62 *variable |= (unsigned long long)low;
63 }
64
reg_split(unsigned long long variable, unsigned int *high, unsigned int *low)65 static void reg_split(unsigned long long variable, unsigned int *high,
66 unsigned int *low)
67 {
68 *low = (unsigned int)variable & 0xFFFFFFFF;
69 *high = (unsigned int)(variable >> 32);
70 }
71
72 /*
73 * Wakes up DMA queue.
74 */
tsi148_DMA_irqhandler(struct tsi148_driver *bridge, int channel_mask)75 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
76 int channel_mask)
77 {
78 u32 serviced = 0;
79
80 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
81 wake_up(&bridge->dma_queue[0]);
82 serviced |= TSI148_LCSR_INTC_DMA0C;
83 }
84 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
85 wake_up(&bridge->dma_queue[1]);
86 serviced |= TSI148_LCSR_INTC_DMA1C;
87 }
88
89 return serviced;
90 }
91
92 /*
93 * Wake up location monitor queue
94 */
tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)95 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
96 {
97 int i;
98 u32 serviced = 0;
99
100 for (i = 0; i < 4; i++) {
101 if (stat & TSI148_LCSR_INTS_LMS[i]) {
102 /* We only enable interrupts if the callback is set */
103 bridge->lm_callback[i](bridge->lm_data[i]);
104 serviced |= TSI148_LCSR_INTC_LMC[i];
105 }
106 }
107
108 return serviced;
109 }
110
111 /*
112 * Wake up mail box queue.
113 *
114 * XXX This functionality is not exposed up though API.
115 */
tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)116 static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
117 {
118 int i;
119 u32 val;
120 u32 serviced = 0;
121 struct tsi148_driver *bridge;
122
123 bridge = tsi148_bridge->driver_priv;
124
125 for (i = 0; i < 4; i++) {
126 if (stat & TSI148_LCSR_INTS_MBS[i]) {
127 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
128 dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
129 ": 0x%x\n", i, val);
130 serviced |= TSI148_LCSR_INTC_MBC[i];
131 }
132 }
133
134 return serviced;
135 }
136
137 /*
138 * Display error & status message when PERR (PCI) exception interrupt occurs.
139 */
tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)140 static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
141 {
142 struct tsi148_driver *bridge;
143
144 bridge = tsi148_bridge->driver_priv;
145
146 dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
147 "attributes: %08x\n",
148 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
149 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
150 ioread32be(bridge->base + TSI148_LCSR_EDPAT));
151
152 dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
153 "completion reg: %08x\n",
154 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
155 ioread32be(bridge->base + TSI148_LCSR_EDPXS));
156
157 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
158
159 return TSI148_LCSR_INTC_PERRC;
160 }
161
162 /*
163 * Save address and status when VME error interrupt occurs.
164 */
tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)165 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
166 {
167 unsigned int error_addr_high, error_addr_low;
168 unsigned long long error_addr;
169 u32 error_attrib;
170 int error_am;
171 struct tsi148_driver *bridge;
172
173 bridge = tsi148_bridge->driver_priv;
174
175 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
176 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
177 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
178 error_am = (error_attrib & TSI148_LCSR_VEAT_AM_M) >> 8;
179
180 reg_join(error_addr_high, error_addr_low, &error_addr);
181
182 /* Check for exception register overflow (we have lost error data) */
183 if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
184 dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
185 "Occurred\n");
186 }
187
188 if (err_chk)
189 vme_bus_error_handler(tsi148_bridge, error_addr, error_am);
190 else
191 dev_err(tsi148_bridge->parent,
192 "VME Bus Error at address: 0x%llx, attributes: %08x\n",
193 error_addr, error_attrib);
194
195 /* Clear Status */
196 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
197
198 return TSI148_LCSR_INTC_VERRC;
199 }
200
201 /*
202 * Wake up IACK queue.
203 */
tsi148_IACK_irqhandler(struct tsi148_driver *bridge)204 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
205 {
206 wake_up(&bridge->iack_queue);
207
208 return TSI148_LCSR_INTC_IACKC;
209 }
210
211 /*
212 * Calling VME bus interrupt callback if provided.
213 */
tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)214 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
215 u32 stat)
216 {
217 int vec, i, serviced = 0;
218 struct tsi148_driver *bridge;
219
220 bridge = tsi148_bridge->driver_priv;
221
222 for (i = 7; i > 0; i--) {
223 if (stat & (1 << i)) {
224 /*
225 * Note: Even though the registers are defined as
226 * 32-bits in the spec, we only want to issue 8-bit
227 * IACK cycles on the bus, read from offset 3.
228 */
229 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
230
231 vme_irq_handler(tsi148_bridge, i, vec);
232
233 serviced |= (1 << i);
234 }
235 }
236
237 return serviced;
238 }
239
240 /*
241 * Top level interrupt handler. Clears appropriate interrupt status bits and
242 * then calls appropriate sub handler(s).
243 */
tsi148_irqhandler(int irq, void *ptr)244 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
245 {
246 u32 stat, enable, serviced = 0;
247 struct vme_bridge *tsi148_bridge;
248 struct tsi148_driver *bridge;
249
250 tsi148_bridge = ptr;
251
252 bridge = tsi148_bridge->driver_priv;
253
254 /* Determine which interrupts are unmasked and set */
255 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
256 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
257
258 /* Only look at unmasked interrupts */
259 stat &= enable;
260
261 if (unlikely(!stat))
262 return IRQ_NONE;
263
264 /* Call subhandlers as appropriate */
265 /* DMA irqs */
266 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
267 serviced |= tsi148_DMA_irqhandler(bridge, stat);
268
269 /* Location monitor irqs */
270 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
271 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
272 serviced |= tsi148_LM_irqhandler(bridge, stat);
273
274 /* Mail box irqs */
275 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
276 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
277 serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
278
279 /* PCI bus error */
280 if (stat & TSI148_LCSR_INTS_PERRS)
281 serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
282
283 /* VME bus error */
284 if (stat & TSI148_LCSR_INTS_VERRS)
285 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
286
287 /* IACK irq */
288 if (stat & TSI148_LCSR_INTS_IACKS)
289 serviced |= tsi148_IACK_irqhandler(bridge);
290
291 /* VME bus irqs */
292 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
293 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
294 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
295 TSI148_LCSR_INTS_IRQ1S))
296 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
297
298 /* Clear serviced interrupts */
299 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
300
301 return IRQ_HANDLED;
302 }
303
tsi148_irq_init(struct vme_bridge *tsi148_bridge)304 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
305 {
306 int result;
307 unsigned int tmp;
308 struct pci_dev *pdev;
309 struct tsi148_driver *bridge;
310
311 pdev = to_pci_dev(tsi148_bridge->parent);
312
313 bridge = tsi148_bridge->driver_priv;
314
315 result = request_irq(pdev->irq,
316 tsi148_irqhandler,
317 IRQF_SHARED,
318 driver_name, tsi148_bridge);
319 if (result) {
320 dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
321 "vector %02X\n", pdev->irq);
322 return result;
323 }
324
325 /* Enable and unmask interrupts */
326 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
327 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
328 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
329 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
330 TSI148_LCSR_INTEO_IACKEO;
331
332 /* This leaves the following interrupts masked.
333 * TSI148_LCSR_INTEO_VIEEO
334 * TSI148_LCSR_INTEO_SYSFLEO
335 * TSI148_LCSR_INTEO_ACFLEO
336 */
337
338 /* Don't enable Location Monitor interrupts here - they will be
339 * enabled when the location monitors are properly configured and
340 * a callback has been attached.
341 * TSI148_LCSR_INTEO_LM0EO
342 * TSI148_LCSR_INTEO_LM1EO
343 * TSI148_LCSR_INTEO_LM2EO
344 * TSI148_LCSR_INTEO_LM3EO
345 */
346
347 /* Don't enable VME interrupts until we add a handler, else the board
348 * will respond to it and we don't want that unless it knows how to
349 * properly deal with it.
350 * TSI148_LCSR_INTEO_IRQ7EO
351 * TSI148_LCSR_INTEO_IRQ6EO
352 * TSI148_LCSR_INTEO_IRQ5EO
353 * TSI148_LCSR_INTEO_IRQ4EO
354 * TSI148_LCSR_INTEO_IRQ3EO
355 * TSI148_LCSR_INTEO_IRQ2EO
356 * TSI148_LCSR_INTEO_IRQ1EO
357 */
358
359 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
360 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
361
362 return 0;
363 }
364
tsi148_irq_exit(struct vme_bridge *tsi148_bridge, struct pci_dev *pdev)365 static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
366 struct pci_dev *pdev)
367 {
368 struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
369
370 /* Turn off interrupts */
371 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
372 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
373
374 /* Clear all interrupts */
375 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
376
377 /* Detach interrupt handler */
378 free_irq(pdev->irq, tsi148_bridge);
379 }
380
381 /*
382 * Check to see if an IACk has been received, return true (1) or false (0).
383 */
tsi148_iack_received(struct tsi148_driver *bridge)384 static int tsi148_iack_received(struct tsi148_driver *bridge)
385 {
386 u32 tmp;
387
388 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
389
390 if (tmp & TSI148_LCSR_VICR_IRQS)
391 return 0;
392 else
393 return 1;
394 }
395
396 /*
397 * Configure VME interrupt
398 */
tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level, int state, int sync)399 static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
400 int state, int sync)
401 {
402 struct pci_dev *pdev;
403 u32 tmp;
404 struct tsi148_driver *bridge;
405
406 bridge = tsi148_bridge->driver_priv;
407
408 /* We need to do the ordering differently for enabling and disabling */
409 if (state == 0) {
410 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
411 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
412 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
413
414 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
415 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
416 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
417
418 if (sync != 0) {
419 pdev = to_pci_dev(tsi148_bridge->parent);
420 synchronize_irq(pdev->irq);
421 }
422 } else {
423 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
424 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
425 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
426
427 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
428 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
429 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
430 }
431 }
432
433 /*
434 * Generate a VME bus interrupt at the requested level & vector. Wait for
435 * interrupt to be acked.
436 */
tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, int statid)437 static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
438 int statid)
439 {
440 u32 tmp;
441 struct tsi148_driver *bridge;
442
443 bridge = tsi148_bridge->driver_priv;
444
445 mutex_lock(&bridge->vme_int);
446
447 /* Read VICR register */
448 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
449
450 /* Set Status/ID */
451 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
452 (statid & TSI148_LCSR_VICR_STID_M);
453 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
454
455 /* Assert VMEbus IRQ */
456 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
457 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
458
459 /* XXX Consider implementing a timeout? */
460 wait_event_interruptible(bridge->iack_queue,
461 tsi148_iack_received(bridge));
462
463 mutex_unlock(&bridge->vme_int);
464
465 return 0;
466 }
467
468 /*
469 * Initialize a slave window with the requested attributes.
470 */
tsi148_slave_set(struct vme_slave_resource *image, int enabled, unsigned long long vme_base, unsigned long long size, dma_addr_t pci_base, u32 aspace, u32 cycle)471 static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
472 unsigned long long vme_base, unsigned long long size,
473 dma_addr_t pci_base, u32 aspace, u32 cycle)
474 {
475 unsigned int i, addr = 0, granularity = 0;
476 unsigned int temp_ctl = 0;
477 unsigned int vme_base_low, vme_base_high;
478 unsigned int vme_bound_low, vme_bound_high;
479 unsigned int pci_offset_low, pci_offset_high;
480 unsigned long long vme_bound, pci_offset;
481 struct vme_bridge *tsi148_bridge;
482 struct tsi148_driver *bridge;
483
484 tsi148_bridge = image->parent;
485 bridge = tsi148_bridge->driver_priv;
486
487 i = image->number;
488
489 switch (aspace) {
490 case VME_A16:
491 granularity = 0x10;
492 addr |= TSI148_LCSR_ITAT_AS_A16;
493 break;
494 case VME_A24:
495 granularity = 0x1000;
496 addr |= TSI148_LCSR_ITAT_AS_A24;
497 break;
498 case VME_A32:
499 granularity = 0x10000;
500 addr |= TSI148_LCSR_ITAT_AS_A32;
501 break;
502 case VME_A64:
503 granularity = 0x10000;
504 addr |= TSI148_LCSR_ITAT_AS_A64;
505 break;
506 default:
507 dev_err(tsi148_bridge->parent, "Invalid address space\n");
508 return -EINVAL;
509 break;
510 }
511
512 /* Convert 64-bit variables to 2x 32-bit variables */
513 reg_split(vme_base, &vme_base_high, &vme_base_low);
514
515 /*
516 * Bound address is a valid address for the window, adjust
517 * accordingly
518 */
519 vme_bound = vme_base + size - granularity;
520 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
521 pci_offset = (unsigned long long)pci_base - vme_base;
522 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
523
524 if (vme_base_low & (granularity - 1)) {
525 dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
526 return -EINVAL;
527 }
528 if (vme_bound_low & (granularity - 1)) {
529 dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
530 return -EINVAL;
531 }
532 if (pci_offset_low & (granularity - 1)) {
533 dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
534 "alignment\n");
535 return -EINVAL;
536 }
537
538 /* Disable while we are mucking around */
539 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
540 TSI148_LCSR_OFFSET_ITAT);
541 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
542 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
543 TSI148_LCSR_OFFSET_ITAT);
544
545 /* Setup mapping */
546 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
547 TSI148_LCSR_OFFSET_ITSAU);
548 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
549 TSI148_LCSR_OFFSET_ITSAL);
550 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
551 TSI148_LCSR_OFFSET_ITEAU);
552 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
553 TSI148_LCSR_OFFSET_ITEAL);
554 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
555 TSI148_LCSR_OFFSET_ITOFU);
556 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
557 TSI148_LCSR_OFFSET_ITOFL);
558
559 /* Setup 2eSST speeds */
560 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
561 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
562 case VME_2eSST160:
563 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
564 break;
565 case VME_2eSST267:
566 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
567 break;
568 case VME_2eSST320:
569 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
570 break;
571 }
572
573 /* Setup cycle types */
574 temp_ctl &= ~(0x1F << 7);
575 if (cycle & VME_BLT)
576 temp_ctl |= TSI148_LCSR_ITAT_BLT;
577 if (cycle & VME_MBLT)
578 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
579 if (cycle & VME_2eVME)
580 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
581 if (cycle & VME_2eSST)
582 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
583 if (cycle & VME_2eSSTB)
584 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
585
586 /* Setup address space */
587 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
588 temp_ctl |= addr;
589
590 temp_ctl &= ~0xF;
591 if (cycle & VME_SUPER)
592 temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
593 if (cycle & VME_USER)
594 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
595 if (cycle & VME_PROG)
596 temp_ctl |= TSI148_LCSR_ITAT_PGM;
597 if (cycle & VME_DATA)
598 temp_ctl |= TSI148_LCSR_ITAT_DATA;
599
600 /* Write ctl reg without enable */
601 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
602 TSI148_LCSR_OFFSET_ITAT);
603
604 if (enabled)
605 temp_ctl |= TSI148_LCSR_ITAT_EN;
606
607 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
608 TSI148_LCSR_OFFSET_ITAT);
609
610 return 0;
611 }
612
613 /*
614 * Get slave window configuration.
615 */
tsi148_slave_get(struct vme_slave_resource *image, int *enabled, unsigned long long *vme_base, unsigned long long *size, dma_addr_t *pci_base, u32 *aspace, u32 *cycle)616 static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
617 unsigned long long *vme_base, unsigned long long *size,
618 dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
619 {
620 unsigned int i, granularity = 0, ctl = 0;
621 unsigned int vme_base_low, vme_base_high;
622 unsigned int vme_bound_low, vme_bound_high;
623 unsigned int pci_offset_low, pci_offset_high;
624 unsigned long long vme_bound, pci_offset;
625 struct tsi148_driver *bridge;
626
627 bridge = image->parent->driver_priv;
628
629 i = image->number;
630
631 /* Read registers */
632 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
633 TSI148_LCSR_OFFSET_ITAT);
634
635 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
636 TSI148_LCSR_OFFSET_ITSAU);
637 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
638 TSI148_LCSR_OFFSET_ITSAL);
639 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
640 TSI148_LCSR_OFFSET_ITEAU);
641 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
642 TSI148_LCSR_OFFSET_ITEAL);
643 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
644 TSI148_LCSR_OFFSET_ITOFU);
645 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
646 TSI148_LCSR_OFFSET_ITOFL);
647
648 /* Convert 64-bit variables to 2x 32-bit variables */
649 reg_join(vme_base_high, vme_base_low, vme_base);
650 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
651 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
652
653 *pci_base = (dma_addr_t)(*vme_base + pci_offset);
654
655 *enabled = 0;
656 *aspace = 0;
657 *cycle = 0;
658
659 if (ctl & TSI148_LCSR_ITAT_EN)
660 *enabled = 1;
661
662 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
663 granularity = 0x10;
664 *aspace |= VME_A16;
665 }
666 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
667 granularity = 0x1000;
668 *aspace |= VME_A24;
669 }
670 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
671 granularity = 0x10000;
672 *aspace |= VME_A32;
673 }
674 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
675 granularity = 0x10000;
676 *aspace |= VME_A64;
677 }
678
679 /* Need granularity before we set the size */
680 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
681
682
683 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
684 *cycle |= VME_2eSST160;
685 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
686 *cycle |= VME_2eSST267;
687 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
688 *cycle |= VME_2eSST320;
689
690 if (ctl & TSI148_LCSR_ITAT_BLT)
691 *cycle |= VME_BLT;
692 if (ctl & TSI148_LCSR_ITAT_MBLT)
693 *cycle |= VME_MBLT;
694 if (ctl & TSI148_LCSR_ITAT_2eVME)
695 *cycle |= VME_2eVME;
696 if (ctl & TSI148_LCSR_ITAT_2eSST)
697 *cycle |= VME_2eSST;
698 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
699 *cycle |= VME_2eSSTB;
700
701 if (ctl & TSI148_LCSR_ITAT_SUPR)
702 *cycle |= VME_SUPER;
703 if (ctl & TSI148_LCSR_ITAT_NPRIV)
704 *cycle |= VME_USER;
705 if (ctl & TSI148_LCSR_ITAT_PGM)
706 *cycle |= VME_PROG;
707 if (ctl & TSI148_LCSR_ITAT_DATA)
708 *cycle |= VME_DATA;
709
710 return 0;
711 }
712
713 /*
714 * Allocate and map PCI Resource
715 */
tsi148_alloc_resource(struct vme_master_resource *image, unsigned long long size)716 static int tsi148_alloc_resource(struct vme_master_resource *image,
717 unsigned long long size)
718 {
719 unsigned long long existing_size;
720 int retval = 0;
721 struct pci_dev *pdev;
722 struct vme_bridge *tsi148_bridge;
723
724 tsi148_bridge = image->parent;
725
726 pdev = to_pci_dev(tsi148_bridge->parent);
727
728 existing_size = (unsigned long long)(image->bus_resource.end -
729 image->bus_resource.start);
730
731 /* If the existing size is OK, return */
732 if ((size != 0) && (existing_size == (size - 1)))
733 return 0;
734
735 if (existing_size != 0) {
736 iounmap(image->kern_base);
737 image->kern_base = NULL;
738 kfree(image->bus_resource.name);
739 release_resource(&image->bus_resource);
740 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
741 }
742
743 /* Exit here if size is zero */
744 if (size == 0)
745 return 0;
746
747 if (!image->bus_resource.name) {
748 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
749 if (!image->bus_resource.name) {
750 retval = -ENOMEM;
751 goto err_name;
752 }
753 }
754
755 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
756 image->number);
757
758 image->bus_resource.start = 0;
759 image->bus_resource.end = (unsigned long)size;
760 image->bus_resource.flags = IORESOURCE_MEM;
761
762 retval = pci_bus_alloc_resource(pdev->bus,
763 &image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
764 0, NULL, NULL);
765 if (retval) {
766 dev_err(tsi148_bridge->parent, "Failed to allocate mem "
767 "resource for window %d size 0x%lx start 0x%lx\n",
768 image->number, (unsigned long)size,
769 (unsigned long)image->bus_resource.start);
770 goto err_resource;
771 }
772
773 image->kern_base = ioremap(
774 image->bus_resource.start, size);
775 if (!image->kern_base) {
776 dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
777 retval = -ENOMEM;
778 goto err_remap;
779 }
780
781 return 0;
782
783 err_remap:
784 release_resource(&image->bus_resource);
785 err_resource:
786 kfree(image->bus_resource.name);
787 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
788 err_name:
789 return retval;
790 }
791
792 /*
793 * Free and unmap PCI Resource
794 */
tsi148_free_resource(struct vme_master_resource *image)795 static void tsi148_free_resource(struct vme_master_resource *image)
796 {
797 iounmap(image->kern_base);
798 image->kern_base = NULL;
799 release_resource(&image->bus_resource);
800 kfree(image->bus_resource.name);
801 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
802 }
803
804 /*
805 * Set the attributes of an outbound window.
806 */
tsi148_master_set(struct vme_master_resource *image, int enabled, unsigned long long vme_base, unsigned long long size, u32 aspace, u32 cycle, u32 dwidth)807 static int tsi148_master_set(struct vme_master_resource *image, int enabled,
808 unsigned long long vme_base, unsigned long long size, u32 aspace,
809 u32 cycle, u32 dwidth)
810 {
811 int retval = 0;
812 unsigned int i;
813 unsigned int temp_ctl = 0;
814 unsigned int pci_base_low, pci_base_high;
815 unsigned int pci_bound_low, pci_bound_high;
816 unsigned int vme_offset_low, vme_offset_high;
817 unsigned long long pci_bound, vme_offset, pci_base;
818 struct vme_bridge *tsi148_bridge;
819 struct tsi148_driver *bridge;
820 struct pci_bus_region region;
821 struct pci_dev *pdev;
822
823 tsi148_bridge = image->parent;
824
825 bridge = tsi148_bridge->driver_priv;
826
827 pdev = to_pci_dev(tsi148_bridge->parent);
828
829 /* Verify input data */
830 if (vme_base & 0xFFFF) {
831 dev_err(tsi148_bridge->parent, "Invalid VME Window "
832 "alignment\n");
833 retval = -EINVAL;
834 goto err_window;
835 }
836
837 if ((size == 0) && (enabled != 0)) {
838 dev_err(tsi148_bridge->parent, "Size must be non-zero for "
839 "enabled windows\n");
840 retval = -EINVAL;
841 goto err_window;
842 }
843
844 spin_lock(&image->lock);
845
846 /* Let's allocate the resource here rather than further up the stack as
847 * it avoids pushing loads of bus dependent stuff up the stack. If size
848 * is zero, any existing resource will be freed.
849 */
850 retval = tsi148_alloc_resource(image, size);
851 if (retval) {
852 spin_unlock(&image->lock);
853 dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
854 "resource\n");
855 goto err_res;
856 }
857
858 if (size == 0) {
859 pci_base = 0;
860 pci_bound = 0;
861 vme_offset = 0;
862 } else {
863 pcibios_resource_to_bus(pdev->bus, ®ion,
864 &image->bus_resource);
865 pci_base = region.start;
866
867 /*
868 * Bound address is a valid address for the window, adjust
869 * according to window granularity.
870 */
871 pci_bound = pci_base + (size - 0x10000);
872 vme_offset = vme_base - pci_base;
873 }
874
875 /* Convert 64-bit variables to 2x 32-bit variables */
876 reg_split(pci_base, &pci_base_high, &pci_base_low);
877 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
878 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
879
880 if (pci_base_low & 0xFFFF) {
881 spin_unlock(&image->lock);
882 dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
883 retval = -EINVAL;
884 goto err_gran;
885 }
886 if (pci_bound_low & 0xFFFF) {
887 spin_unlock(&image->lock);
888 dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
889 retval = -EINVAL;
890 goto err_gran;
891 }
892 if (vme_offset_low & 0xFFFF) {
893 spin_unlock(&image->lock);
894 dev_err(tsi148_bridge->parent, "Invalid VME Offset "
895 "alignment\n");
896 retval = -EINVAL;
897 goto err_gran;
898 }
899
900 i = image->number;
901
902 /* Disable while we are mucking around */
903 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
904 TSI148_LCSR_OFFSET_OTAT);
905 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
906 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
907 TSI148_LCSR_OFFSET_OTAT);
908
909 /* Setup 2eSST speeds */
910 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
911 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
912 case VME_2eSST160:
913 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
914 break;
915 case VME_2eSST267:
916 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
917 break;
918 case VME_2eSST320:
919 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
920 break;
921 }
922
923 /* Setup cycle types */
924 if (cycle & VME_BLT) {
925 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
926 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
927 }
928 if (cycle & VME_MBLT) {
929 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
930 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
931 }
932 if (cycle & VME_2eVME) {
933 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
934 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
935 }
936 if (cycle & VME_2eSST) {
937 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
938 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
939 }
940 if (cycle & VME_2eSSTB) {
941 dev_warn(tsi148_bridge->parent, "Currently not setting "
942 "Broadcast Select Registers\n");
943 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
944 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
945 }
946
947 /* Setup data width */
948 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
949 switch (dwidth) {
950 case VME_D16:
951 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
952 break;
953 case VME_D32:
954 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
955 break;
956 default:
957 spin_unlock(&image->lock);
958 dev_err(tsi148_bridge->parent, "Invalid data width\n");
959 retval = -EINVAL;
960 goto err_dwidth;
961 }
962
963 /* Setup address space */
964 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
965 switch (aspace) {
966 case VME_A16:
967 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
968 break;
969 case VME_A24:
970 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
971 break;
972 case VME_A32:
973 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
974 break;
975 case VME_A64:
976 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
977 break;
978 case VME_CRCSR:
979 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
980 break;
981 case VME_USER1:
982 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
983 break;
984 case VME_USER2:
985 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
986 break;
987 case VME_USER3:
988 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
989 break;
990 case VME_USER4:
991 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
992 break;
993 default:
994 spin_unlock(&image->lock);
995 dev_err(tsi148_bridge->parent, "Invalid address space\n");
996 retval = -EINVAL;
997 goto err_aspace;
998 break;
999 }
1000
1001 temp_ctl &= ~(3<<4);
1002 if (cycle & VME_SUPER)
1003 temp_ctl |= TSI148_LCSR_OTAT_SUP;
1004 if (cycle & VME_PROG)
1005 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1006
1007 /* Setup mapping */
1008 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1009 TSI148_LCSR_OFFSET_OTSAU);
1010 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1011 TSI148_LCSR_OFFSET_OTSAL);
1012 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1013 TSI148_LCSR_OFFSET_OTEAU);
1014 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1015 TSI148_LCSR_OFFSET_OTEAL);
1016 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1017 TSI148_LCSR_OFFSET_OTOFU);
1018 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1019 TSI148_LCSR_OFFSET_OTOFL);
1020
1021 /* Write ctl reg without enable */
1022 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1023 TSI148_LCSR_OFFSET_OTAT);
1024
1025 if (enabled)
1026 temp_ctl |= TSI148_LCSR_OTAT_EN;
1027
1028 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1029 TSI148_LCSR_OFFSET_OTAT);
1030
1031 spin_unlock(&image->lock);
1032 return 0;
1033
1034 err_aspace:
1035 err_dwidth:
1036 err_gran:
1037 tsi148_free_resource(image);
1038 err_res:
1039 err_window:
1040 return retval;
1041
1042 }
1043
1044 /*
1045 * Set the attributes of an outbound window.
1046 *
1047 * XXX Not parsing prefetch information.
1048 */
__tsi148_master_get(struct vme_master_resource *image, int *enabled, unsigned long long *vme_base, unsigned long long *size, u32 *aspace, u32 *cycle, u32 *dwidth)1049 static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1050 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1051 u32 *cycle, u32 *dwidth)
1052 {
1053 unsigned int i, ctl;
1054 unsigned int pci_base_low, pci_base_high;
1055 unsigned int pci_bound_low, pci_bound_high;
1056 unsigned int vme_offset_low, vme_offset_high;
1057
1058 unsigned long long pci_base, pci_bound, vme_offset;
1059 struct tsi148_driver *bridge;
1060
1061 bridge = image->parent->driver_priv;
1062
1063 i = image->number;
1064
1065 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1066 TSI148_LCSR_OFFSET_OTAT);
1067
1068 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1069 TSI148_LCSR_OFFSET_OTSAU);
1070 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1071 TSI148_LCSR_OFFSET_OTSAL);
1072 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1073 TSI148_LCSR_OFFSET_OTEAU);
1074 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1075 TSI148_LCSR_OFFSET_OTEAL);
1076 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1077 TSI148_LCSR_OFFSET_OTOFU);
1078 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1079 TSI148_LCSR_OFFSET_OTOFL);
1080
1081 /* Convert 64-bit variables to 2x 32-bit variables */
1082 reg_join(pci_base_high, pci_base_low, &pci_base);
1083 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1084 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1085
1086 *vme_base = pci_base + vme_offset;
1087 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1088
1089 *enabled = 0;
1090 *aspace = 0;
1091 *cycle = 0;
1092 *dwidth = 0;
1093
1094 if (ctl & TSI148_LCSR_OTAT_EN)
1095 *enabled = 1;
1096
1097 /* Setup address space */
1098 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1099 *aspace |= VME_A16;
1100 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1101 *aspace |= VME_A24;
1102 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1103 *aspace |= VME_A32;
1104 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1105 *aspace |= VME_A64;
1106 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1107 *aspace |= VME_CRCSR;
1108 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1109 *aspace |= VME_USER1;
1110 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1111 *aspace |= VME_USER2;
1112 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1113 *aspace |= VME_USER3;
1114 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1115 *aspace |= VME_USER4;
1116
1117 /* Setup 2eSST speeds */
1118 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1119 *cycle |= VME_2eSST160;
1120 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1121 *cycle |= VME_2eSST267;
1122 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1123 *cycle |= VME_2eSST320;
1124
1125 /* Setup cycle types */
1126 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1127 *cycle |= VME_SCT;
1128 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1129 *cycle |= VME_BLT;
1130 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1131 *cycle |= VME_MBLT;
1132 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1133 *cycle |= VME_2eVME;
1134 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1135 *cycle |= VME_2eSST;
1136 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1137 *cycle |= VME_2eSSTB;
1138
1139 if (ctl & TSI148_LCSR_OTAT_SUP)
1140 *cycle |= VME_SUPER;
1141 else
1142 *cycle |= VME_USER;
1143
1144 if (ctl & TSI148_LCSR_OTAT_PGM)
1145 *cycle |= VME_PROG;
1146 else
1147 *cycle |= VME_DATA;
1148
1149 /* Setup data width */
1150 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1151 *dwidth = VME_D16;
1152 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1153 *dwidth = VME_D32;
1154
1155 return 0;
1156 }
1157
1158
tsi148_master_get(struct vme_master_resource *image, int *enabled, unsigned long long *vme_base, unsigned long long *size, u32 *aspace, u32 *cycle, u32 *dwidth)1159 static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1160 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1161 u32 *cycle, u32 *dwidth)
1162 {
1163 int retval;
1164
1165 spin_lock(&image->lock);
1166
1167 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1168 cycle, dwidth);
1169
1170 spin_unlock(&image->lock);
1171
1172 return retval;
1173 }
1174
tsi148_master_read(struct vme_master_resource *image, void *buf, size_t count, loff_t offset)1175 static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1176 size_t count, loff_t offset)
1177 {
1178 int retval, enabled;
1179 unsigned long long vme_base, size;
1180 u32 aspace, cycle, dwidth;
1181 struct vme_error_handler *handler = NULL;
1182 struct vme_bridge *tsi148_bridge;
1183 void __iomem *addr = image->kern_base + offset;
1184 unsigned int done = 0;
1185 unsigned int count32;
1186
1187 tsi148_bridge = image->parent;
1188
1189 spin_lock(&image->lock);
1190
1191 if (err_chk) {
1192 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1193 &cycle, &dwidth);
1194 handler = vme_register_error_handler(tsi148_bridge, aspace,
1195 vme_base + offset, count);
1196 if (!handler) {
1197 spin_unlock(&image->lock);
1198 return -ENOMEM;
1199 }
1200 }
1201
1202 /* The following code handles VME address alignment. We cannot use
1203 * memcpy_xxx here because it may cut data transfers in to 8-bit
1204 * cycles when D16 or D32 cycles are required on the VME bus.
1205 * On the other hand, the bridge itself assures that the maximum data
1206 * cycle configured for the transfer is used and splits it
1207 * automatically for non-aligned addresses, so we don't want the
1208 * overhead of needlessly forcing small transfers for the entire cycle.
1209 */
1210 if ((uintptr_t)addr & 0x1) {
1211 *(u8 *)buf = ioread8(addr);
1212 done += 1;
1213 if (done == count)
1214 goto out;
1215 }
1216 if ((uintptr_t)(addr + done) & 0x2) {
1217 if ((count - done) < 2) {
1218 *(u8 *)(buf + done) = ioread8(addr + done);
1219 done += 1;
1220 goto out;
1221 } else {
1222 *(u16 *)(buf + done) = ioread16(addr + done);
1223 done += 2;
1224 }
1225 }
1226
1227 count32 = (count - done) & ~0x3;
1228 while (done < count32) {
1229 *(u32 *)(buf + done) = ioread32(addr + done);
1230 done += 4;
1231 }
1232
1233 if ((count - done) & 0x2) {
1234 *(u16 *)(buf + done) = ioread16(addr + done);
1235 done += 2;
1236 }
1237 if ((count - done) & 0x1) {
1238 *(u8 *)(buf + done) = ioread8(addr + done);
1239 done += 1;
1240 }
1241
1242 out:
1243 retval = count;
1244
1245 if (err_chk) {
1246 if (handler->num_errors) {
1247 dev_err(image->parent->parent,
1248 "First VME read error detected an at address 0x%llx\n",
1249 handler->first_error);
1250 retval = handler->first_error - (vme_base + offset);
1251 }
1252 vme_unregister_error_handler(handler);
1253 }
1254
1255 spin_unlock(&image->lock);
1256
1257 return retval;
1258 }
1259
1260
tsi148_master_write(struct vme_master_resource *image, void *buf, size_t count, loff_t offset)1261 static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1262 size_t count, loff_t offset)
1263 {
1264 int retval = 0, enabled;
1265 unsigned long long vme_base, size;
1266 u32 aspace, cycle, dwidth;
1267 void __iomem *addr = image->kern_base + offset;
1268 unsigned int done = 0;
1269 unsigned int count32;
1270
1271 struct vme_error_handler *handler = NULL;
1272 struct vme_bridge *tsi148_bridge;
1273 struct tsi148_driver *bridge;
1274
1275 tsi148_bridge = image->parent;
1276
1277 bridge = tsi148_bridge->driver_priv;
1278
1279 spin_lock(&image->lock);
1280
1281 if (err_chk) {
1282 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1283 &cycle, &dwidth);
1284 handler = vme_register_error_handler(tsi148_bridge, aspace,
1285 vme_base + offset, count);
1286 if (!handler) {
1287 spin_unlock(&image->lock);
1288 return -ENOMEM;
1289 }
1290 }
1291
1292 /* Here we apply for the same strategy we do in master_read
1293 * function in order to assure the correct cycles.
1294 */
1295 if ((uintptr_t)addr & 0x1) {
1296 iowrite8(*(u8 *)buf, addr);
1297 done += 1;
1298 if (done == count)
1299 goto out;
1300 }
1301 if ((uintptr_t)(addr + done) & 0x2) {
1302 if ((count - done) < 2) {
1303 iowrite8(*(u8 *)(buf + done), addr + done);
1304 done += 1;
1305 goto out;
1306 } else {
1307 iowrite16(*(u16 *)(buf + done), addr + done);
1308 done += 2;
1309 }
1310 }
1311
1312 count32 = (count - done) & ~0x3;
1313 while (done < count32) {
1314 iowrite32(*(u32 *)(buf + done), addr + done);
1315 done += 4;
1316 }
1317
1318 if ((count - done) & 0x2) {
1319 iowrite16(*(u16 *)(buf + done), addr + done);
1320 done += 2;
1321 }
1322 if ((count - done) & 0x1) {
1323 iowrite8(*(u8 *)(buf + done), addr + done);
1324 done += 1;
1325 }
1326
1327 out:
1328 retval = count;
1329
1330 /*
1331 * Writes are posted. We need to do a read on the VME bus to flush out
1332 * all of the writes before we check for errors. We can't guarantee
1333 * that reading the data we have just written is safe. It is believed
1334 * that there isn't any read, write re-ordering, so we can read any
1335 * location in VME space, so lets read the Device ID from the tsi148's
1336 * own registers as mapped into CR/CSR space.
1337 *
1338 * We check for saved errors in the written address range/space.
1339 */
1340
1341 if (err_chk) {
1342 ioread16(bridge->flush_image->kern_base + 0x7F000);
1343
1344 if (handler->num_errors) {
1345 dev_warn(tsi148_bridge->parent,
1346 "First VME write error detected an at address 0x%llx\n",
1347 handler->first_error);
1348 retval = handler->first_error - (vme_base + offset);
1349 }
1350 vme_unregister_error_handler(handler);
1351 }
1352
1353 spin_unlock(&image->lock);
1354
1355 return retval;
1356 }
1357
1358 /*
1359 * Perform an RMW cycle on the VME bus.
1360 *
1361 * Requires a previously configured master window, returns final value.
1362 */
tsi148_master_rmw(struct vme_master_resource *image, unsigned int mask, unsigned int compare, unsigned int swap, loff_t offset)1363 static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1364 unsigned int mask, unsigned int compare, unsigned int swap,
1365 loff_t offset)
1366 {
1367 unsigned long long pci_addr;
1368 unsigned int pci_addr_high, pci_addr_low;
1369 u32 tmp, result;
1370 int i;
1371 struct tsi148_driver *bridge;
1372
1373 bridge = image->parent->driver_priv;
1374
1375 /* Find the PCI address that maps to the desired VME address */
1376 i = image->number;
1377
1378 /* Locking as we can only do one of these at a time */
1379 mutex_lock(&bridge->vme_rmw);
1380
1381 /* Lock image */
1382 spin_lock(&image->lock);
1383
1384 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1385 TSI148_LCSR_OFFSET_OTSAU);
1386 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1387 TSI148_LCSR_OFFSET_OTSAL);
1388
1389 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1390 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1391
1392 /* Configure registers */
1393 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1394 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1395 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1396 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1397 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1398
1399 /* Enable RMW */
1400 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1401 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1402 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1403
1404 /* Kick process off with a read to the required address. */
1405 result = ioread32be(image->kern_base + offset);
1406
1407 /* Disable RMW */
1408 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1409 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1410 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1411
1412 spin_unlock(&image->lock);
1413
1414 mutex_unlock(&bridge->vme_rmw);
1415
1416 return result;
1417 }
1418
tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr, u32 aspace, u32 cycle, u32 dwidth)1419 static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
1420 u32 aspace, u32 cycle, u32 dwidth)
1421 {
1422 u32 val;
1423
1424 val = be32_to_cpu(*attr);
1425
1426 /* Setup 2eSST speeds */
1427 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1428 case VME_2eSST160:
1429 val |= TSI148_LCSR_DSAT_2eSSTM_160;
1430 break;
1431 case VME_2eSST267:
1432 val |= TSI148_LCSR_DSAT_2eSSTM_267;
1433 break;
1434 case VME_2eSST320:
1435 val |= TSI148_LCSR_DSAT_2eSSTM_320;
1436 break;
1437 }
1438
1439 /* Setup cycle types */
1440 if (cycle & VME_SCT)
1441 val |= TSI148_LCSR_DSAT_TM_SCT;
1442
1443 if (cycle & VME_BLT)
1444 val |= TSI148_LCSR_DSAT_TM_BLT;
1445
1446 if (cycle & VME_MBLT)
1447 val |= TSI148_LCSR_DSAT_TM_MBLT;
1448
1449 if (cycle & VME_2eVME)
1450 val |= TSI148_LCSR_DSAT_TM_2eVME;
1451
1452 if (cycle & VME_2eSST)
1453 val |= TSI148_LCSR_DSAT_TM_2eSST;
1454
1455 if (cycle & VME_2eSSTB) {
1456 dev_err(dev, "Currently not setting Broadcast Select "
1457 "Registers\n");
1458 val |= TSI148_LCSR_DSAT_TM_2eSSTB;
1459 }
1460
1461 /* Setup data width */
1462 switch (dwidth) {
1463 case VME_D16:
1464 val |= TSI148_LCSR_DSAT_DBW_16;
1465 break;
1466 case VME_D32:
1467 val |= TSI148_LCSR_DSAT_DBW_32;
1468 break;
1469 default:
1470 dev_err(dev, "Invalid data width\n");
1471 return -EINVAL;
1472 }
1473
1474 /* Setup address space */
1475 switch (aspace) {
1476 case VME_A16:
1477 val |= TSI148_LCSR_DSAT_AMODE_A16;
1478 break;
1479 case VME_A24:
1480 val |= TSI148_LCSR_DSAT_AMODE_A24;
1481 break;
1482 case VME_A32:
1483 val |= TSI148_LCSR_DSAT_AMODE_A32;
1484 break;
1485 case VME_A64:
1486 val |= TSI148_LCSR_DSAT_AMODE_A64;
1487 break;
1488 case VME_CRCSR:
1489 val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1490 break;
1491 case VME_USER1:
1492 val |= TSI148_LCSR_DSAT_AMODE_USER1;
1493 break;
1494 case VME_USER2:
1495 val |= TSI148_LCSR_DSAT_AMODE_USER2;
1496 break;
1497 case VME_USER3:
1498 val |= TSI148_LCSR_DSAT_AMODE_USER3;
1499 break;
1500 case VME_USER4:
1501 val |= TSI148_LCSR_DSAT_AMODE_USER4;
1502 break;
1503 default:
1504 dev_err(dev, "Invalid address space\n");
1505 return -EINVAL;
1506 break;
1507 }
1508
1509 if (cycle & VME_SUPER)
1510 val |= TSI148_LCSR_DSAT_SUP;
1511 if (cycle & VME_PROG)
1512 val |= TSI148_LCSR_DSAT_PGM;
1513
1514 *attr = cpu_to_be32(val);
1515
1516 return 0;
1517 }
1518
tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr, u32 aspace, u32 cycle, u32 dwidth)1519 static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
1520 u32 aspace, u32 cycle, u32 dwidth)
1521 {
1522 u32 val;
1523
1524 val = be32_to_cpu(*attr);
1525
1526 /* Setup 2eSST speeds */
1527 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1528 case VME_2eSST160:
1529 val |= TSI148_LCSR_DDAT_2eSSTM_160;
1530 break;
1531 case VME_2eSST267:
1532 val |= TSI148_LCSR_DDAT_2eSSTM_267;
1533 break;
1534 case VME_2eSST320:
1535 val |= TSI148_LCSR_DDAT_2eSSTM_320;
1536 break;
1537 }
1538
1539 /* Setup cycle types */
1540 if (cycle & VME_SCT)
1541 val |= TSI148_LCSR_DDAT_TM_SCT;
1542
1543 if (cycle & VME_BLT)
1544 val |= TSI148_LCSR_DDAT_TM_BLT;
1545
1546 if (cycle & VME_MBLT)
1547 val |= TSI148_LCSR_DDAT_TM_MBLT;
1548
1549 if (cycle & VME_2eVME)
1550 val |= TSI148_LCSR_DDAT_TM_2eVME;
1551
1552 if (cycle & VME_2eSST)
1553 val |= TSI148_LCSR_DDAT_TM_2eSST;
1554
1555 if (cycle & VME_2eSSTB) {
1556 dev_err(dev, "Currently not setting Broadcast Select "
1557 "Registers\n");
1558 val |= TSI148_LCSR_DDAT_TM_2eSSTB;
1559 }
1560
1561 /* Setup data width */
1562 switch (dwidth) {
1563 case VME_D16:
1564 val |= TSI148_LCSR_DDAT_DBW_16;
1565 break;
1566 case VME_D32:
1567 val |= TSI148_LCSR_DDAT_DBW_32;
1568 break;
1569 default:
1570 dev_err(dev, "Invalid data width\n");
1571 return -EINVAL;
1572 }
1573
1574 /* Setup address space */
1575 switch (aspace) {
1576 case VME_A16:
1577 val |= TSI148_LCSR_DDAT_AMODE_A16;
1578 break;
1579 case VME_A24:
1580 val |= TSI148_LCSR_DDAT_AMODE_A24;
1581 break;
1582 case VME_A32:
1583 val |= TSI148_LCSR_DDAT_AMODE_A32;
1584 break;
1585 case VME_A64:
1586 val |= TSI148_LCSR_DDAT_AMODE_A64;
1587 break;
1588 case VME_CRCSR:
1589 val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1590 break;
1591 case VME_USER1:
1592 val |= TSI148_LCSR_DDAT_AMODE_USER1;
1593 break;
1594 case VME_USER2:
1595 val |= TSI148_LCSR_DDAT_AMODE_USER2;
1596 break;
1597 case VME_USER3:
1598 val |= TSI148_LCSR_DDAT_AMODE_USER3;
1599 break;
1600 case VME_USER4:
1601 val |= TSI148_LCSR_DDAT_AMODE_USER4;
1602 break;
1603 default:
1604 dev_err(dev, "Invalid address space\n");
1605 return -EINVAL;
1606 break;
1607 }
1608
1609 if (cycle & VME_SUPER)
1610 val |= TSI148_LCSR_DDAT_SUP;
1611 if (cycle & VME_PROG)
1612 val |= TSI148_LCSR_DDAT_PGM;
1613
1614 *attr = cpu_to_be32(val);
1615
1616 return 0;
1617 }
1618
1619 /*
1620 * Add a link list descriptor to the list
1621 *
1622 * Note: DMA engine expects the DMA descriptor to be big endian.
1623 */
tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)1624 static int tsi148_dma_list_add(struct vme_dma_list *list,
1625 struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1626 {
1627 struct tsi148_dma_entry *entry, *prev;
1628 u32 address_high, address_low, val;
1629 struct vme_dma_pattern *pattern_attr;
1630 struct vme_dma_pci *pci_attr;
1631 struct vme_dma_vme *vme_attr;
1632 int retval = 0;
1633 struct vme_bridge *tsi148_bridge;
1634
1635 tsi148_bridge = list->parent->parent;
1636
1637 /* Descriptor must be aligned on 64-bit boundaries */
1638 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1639 if (!entry) {
1640 retval = -ENOMEM;
1641 goto err_mem;
1642 }
1643
1644 /* Test descriptor alignment */
1645 if ((unsigned long)&entry->descriptor & 0x7) {
1646 dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
1647 "byte boundary as required: %p\n",
1648 &entry->descriptor);
1649 retval = -EINVAL;
1650 goto err_align;
1651 }
1652
1653 /* Given we are going to fill out the structure, we probably don't
1654 * need to zero it, but better safe than sorry for now.
1655 */
1656 memset(&entry->descriptor, 0, sizeof(entry->descriptor));
1657
1658 /* Fill out source part */
1659 switch (src->type) {
1660 case VME_DMA_PATTERN:
1661 pattern_attr = src->private;
1662
1663 entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
1664
1665 val = TSI148_LCSR_DSAT_TYP_PAT;
1666
1667 /* Default behaviour is 32 bit pattern */
1668 if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1669 val |= TSI148_LCSR_DSAT_PSZ;
1670
1671 /* It seems that the default behaviour is to increment */
1672 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1673 val |= TSI148_LCSR_DSAT_NIN;
1674 entry->descriptor.dsat = cpu_to_be32(val);
1675 break;
1676 case VME_DMA_PCI:
1677 pci_attr = src->private;
1678
1679 reg_split((unsigned long long)pci_attr->address, &address_high,
1680 &address_low);
1681 entry->descriptor.dsau = cpu_to_be32(address_high);
1682 entry->descriptor.dsal = cpu_to_be32(address_low);
1683 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
1684 break;
1685 case VME_DMA_VME:
1686 vme_attr = src->private;
1687
1688 reg_split((unsigned long long)vme_attr->address, &address_high,
1689 &address_low);
1690 entry->descriptor.dsau = cpu_to_be32(address_high);
1691 entry->descriptor.dsal = cpu_to_be32(address_low);
1692 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
1693
1694 retval = tsi148_dma_set_vme_src_attributes(
1695 tsi148_bridge->parent, &entry->descriptor.dsat,
1696 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1697 if (retval < 0)
1698 goto err_source;
1699 break;
1700 default:
1701 dev_err(tsi148_bridge->parent, "Invalid source type\n");
1702 retval = -EINVAL;
1703 goto err_source;
1704 break;
1705 }
1706
1707 /* Assume last link - this will be over-written by adding another */
1708 entry->descriptor.dnlau = cpu_to_be32(0);
1709 entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
1710
1711 /* Fill out destination part */
1712 switch (dest->type) {
1713 case VME_DMA_PCI:
1714 pci_attr = dest->private;
1715
1716 reg_split((unsigned long long)pci_attr->address, &address_high,
1717 &address_low);
1718 entry->descriptor.ddau = cpu_to_be32(address_high);
1719 entry->descriptor.ddal = cpu_to_be32(address_low);
1720 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
1721 break;
1722 case VME_DMA_VME:
1723 vme_attr = dest->private;
1724
1725 reg_split((unsigned long long)vme_attr->address, &address_high,
1726 &address_low);
1727 entry->descriptor.ddau = cpu_to_be32(address_high);
1728 entry->descriptor.ddal = cpu_to_be32(address_low);
1729 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
1730
1731 retval = tsi148_dma_set_vme_dest_attributes(
1732 tsi148_bridge->parent, &entry->descriptor.ddat,
1733 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1734 if (retval < 0)
1735 goto err_dest;
1736 break;
1737 default:
1738 dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1739 retval = -EINVAL;
1740 goto err_dest;
1741 break;
1742 }
1743
1744 /* Fill out count */
1745 entry->descriptor.dcnt = cpu_to_be32((u32)count);
1746
1747 /* Add to list */
1748 list_add_tail(&entry->list, &list->entries);
1749
1750 entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1751 &entry->descriptor,
1752 sizeof(entry->descriptor),
1753 DMA_TO_DEVICE);
1754 if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) {
1755 dev_err(tsi148_bridge->parent, "DMA mapping error\n");
1756 retval = -EINVAL;
1757 goto err_dma;
1758 }
1759
1760 /* Fill out previous descriptors "Next Address" */
1761 if (entry->list.prev != &list->entries) {
1762 reg_split((unsigned long long)entry->dma_handle, &address_high,
1763 &address_low);
1764 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1765 list);
1766 prev->descriptor.dnlau = cpu_to_be32(address_high);
1767 prev->descriptor.dnlal = cpu_to_be32(address_low);
1768
1769 }
1770
1771 return 0;
1772
1773 err_dma:
1774 list_del(&entry->list);
1775 err_dest:
1776 err_source:
1777 err_align:
1778 kfree(entry);
1779 err_mem:
1780 return retval;
1781 }
1782
1783 /*
1784 * Check to see if the provided DMA channel is busy.
1785 */
tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)1786 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1787 {
1788 u32 tmp;
1789 struct tsi148_driver *bridge;
1790
1791 bridge = tsi148_bridge->driver_priv;
1792
1793 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1794 TSI148_LCSR_OFFSET_DSTA);
1795
1796 if (tmp & TSI148_LCSR_DSTA_BSY)
1797 return 0;
1798 else
1799 return 1;
1800
1801 }
1802
1803 /*
1804 * Execute a previously generated link list
1805 *
1806 * XXX Need to provide control register configuration.
1807 */
tsi148_dma_list_exec(struct vme_dma_list *list)1808 static int tsi148_dma_list_exec(struct vme_dma_list *list)
1809 {
1810 struct vme_dma_resource *ctrlr;
1811 int channel, retval;
1812 struct tsi148_dma_entry *entry;
1813 u32 bus_addr_high, bus_addr_low;
1814 u32 val, dctlreg = 0;
1815 struct vme_bridge *tsi148_bridge;
1816 struct tsi148_driver *bridge;
1817
1818 ctrlr = list->parent;
1819
1820 tsi148_bridge = ctrlr->parent;
1821
1822 bridge = tsi148_bridge->driver_priv;
1823
1824 mutex_lock(&ctrlr->mtx);
1825
1826 channel = ctrlr->number;
1827
1828 if (!list_empty(&ctrlr->running)) {
1829 /*
1830 * XXX We have an active DMA transfer and currently haven't
1831 * sorted out the mechanism for "pending" DMA transfers.
1832 * Return busy.
1833 */
1834 /* Need to add to pending here */
1835 mutex_unlock(&ctrlr->mtx);
1836 return -EBUSY;
1837 } else {
1838 list_add(&list->list, &ctrlr->running);
1839 }
1840
1841 /* Get first bus address and write into registers */
1842 entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1843 list);
1844
1845 mutex_unlock(&ctrlr->mtx);
1846
1847 reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
1848
1849 iowrite32be(bus_addr_high, bridge->base +
1850 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1851 iowrite32be(bus_addr_low, bridge->base +
1852 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1853
1854 dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1855 TSI148_LCSR_OFFSET_DCTL);
1856
1857 /* Start the operation */
1858 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1859 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1860
1861 retval = wait_event_interruptible(bridge->dma_queue[channel],
1862 tsi148_dma_busy(ctrlr->parent, channel));
1863
1864 if (retval) {
1865 iowrite32be(dctlreg | TSI148_LCSR_DCTL_ABT, bridge->base +
1866 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1867 /* Wait for the operation to abort */
1868 wait_event(bridge->dma_queue[channel],
1869 tsi148_dma_busy(ctrlr->parent, channel));
1870 retval = -EINTR;
1871 goto exit;
1872 }
1873
1874 /*
1875 * Read status register, this register is valid until we kick off a
1876 * new transfer.
1877 */
1878 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1879 TSI148_LCSR_OFFSET_DSTA);
1880
1881 if (val & TSI148_LCSR_DSTA_VBE) {
1882 dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1883 retval = -EIO;
1884 }
1885
1886 exit:
1887 /* Remove list from running list */
1888 mutex_lock(&ctrlr->mtx);
1889 list_del(&list->list);
1890 mutex_unlock(&ctrlr->mtx);
1891
1892 return retval;
1893 }
1894
1895 /*
1896 * Clean up a previously generated link list
1897 *
1898 * We have a separate function, don't assume that the chain can't be reused.
1899 */
tsi148_dma_list_empty(struct vme_dma_list *list)1900 static int tsi148_dma_list_empty(struct vme_dma_list *list)
1901 {
1902 struct list_head *pos, *temp;
1903 struct tsi148_dma_entry *entry;
1904
1905 struct vme_bridge *tsi148_bridge = list->parent->parent;
1906
1907 /* detach and free each entry */
1908 list_for_each_safe(pos, temp, &list->entries) {
1909 list_del(pos);
1910 entry = list_entry(pos, struct tsi148_dma_entry, list);
1911
1912 dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
1913 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1914 kfree(entry);
1915 }
1916
1917 return 0;
1918 }
1919
1920 /*
1921 * All 4 location monitors reside at the same base - this is therefore a
1922 * system wide configuration.
1923 *
1924 * This does not enable the LM monitor - that should be done when the first
1925 * callback is attached and disabled when the last callback is removed.
1926 */
tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base, u32 aspace, u32 cycle)1927 static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1928 u32 aspace, u32 cycle)
1929 {
1930 u32 lm_base_high, lm_base_low, lm_ctl = 0;
1931 int i;
1932 struct vme_bridge *tsi148_bridge;
1933 struct tsi148_driver *bridge;
1934
1935 tsi148_bridge = lm->parent;
1936
1937 bridge = tsi148_bridge->driver_priv;
1938
1939 mutex_lock(&lm->mtx);
1940
1941 /* If we already have a callback attached, we can't move it! */
1942 for (i = 0; i < lm->monitors; i++) {
1943 if (bridge->lm_callback[i]) {
1944 mutex_unlock(&lm->mtx);
1945 dev_err(tsi148_bridge->parent, "Location monitor "
1946 "callback attached, can't reset\n");
1947 return -EBUSY;
1948 }
1949 }
1950
1951 switch (aspace) {
1952 case VME_A16:
1953 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1954 break;
1955 case VME_A24:
1956 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1957 break;
1958 case VME_A32:
1959 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1960 break;
1961 case VME_A64:
1962 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1963 break;
1964 default:
1965 mutex_unlock(&lm->mtx);
1966 dev_err(tsi148_bridge->parent, "Invalid address space\n");
1967 return -EINVAL;
1968 break;
1969 }
1970
1971 if (cycle & VME_SUPER)
1972 lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
1973 if (cycle & VME_USER)
1974 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1975 if (cycle & VME_PROG)
1976 lm_ctl |= TSI148_LCSR_LMAT_PGM;
1977 if (cycle & VME_DATA)
1978 lm_ctl |= TSI148_LCSR_LMAT_DATA;
1979
1980 reg_split(lm_base, &lm_base_high, &lm_base_low);
1981
1982 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1983 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1984 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
1985
1986 mutex_unlock(&lm->mtx);
1987
1988 return 0;
1989 }
1990
1991 /* Get configuration of the callback monitor and return whether it is enabled
1992 * or disabled.
1993 */
tsi148_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base, u32 *aspace, u32 *cycle)1994 static int tsi148_lm_get(struct vme_lm_resource *lm,
1995 unsigned long long *lm_base, u32 *aspace, u32 *cycle)
1996 {
1997 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
1998 struct tsi148_driver *bridge;
1999
2000 bridge = lm->parent->driver_priv;
2001
2002 mutex_lock(&lm->mtx);
2003
2004 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
2005 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
2006 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2007
2008 reg_join(lm_base_high, lm_base_low, lm_base);
2009
2010 if (lm_ctl & TSI148_LCSR_LMAT_EN)
2011 enabled = 1;
2012
2013 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
2014 *aspace |= VME_A16;
2015
2016 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
2017 *aspace |= VME_A24;
2018
2019 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
2020 *aspace |= VME_A32;
2021
2022 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
2023 *aspace |= VME_A64;
2024
2025
2026 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2027 *cycle |= VME_SUPER;
2028 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2029 *cycle |= VME_USER;
2030 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2031 *cycle |= VME_PROG;
2032 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2033 *cycle |= VME_DATA;
2034
2035 mutex_unlock(&lm->mtx);
2036
2037 return enabled;
2038 }
2039
2040 /*
2041 * Attach a callback to a specific location monitor.
2042 *
2043 * Callback will be passed the monitor triggered.
2044 */
tsi148_lm_attach(struct vme_lm_resource *lm, int monitor, void (*callback)(void *), void *data)2045 static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2046 void (*callback)(void *), void *data)
2047 {
2048 u32 lm_ctl, tmp;
2049 struct vme_bridge *tsi148_bridge;
2050 struct tsi148_driver *bridge;
2051
2052 tsi148_bridge = lm->parent;
2053
2054 bridge = tsi148_bridge->driver_priv;
2055
2056 mutex_lock(&lm->mtx);
2057
2058 /* Ensure that the location monitor is configured - need PGM or DATA */
2059 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2060 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2061 mutex_unlock(&lm->mtx);
2062 dev_err(tsi148_bridge->parent, "Location monitor not properly "
2063 "configured\n");
2064 return -EINVAL;
2065 }
2066
2067 /* Check that a callback isn't already attached */
2068 if (bridge->lm_callback[monitor]) {
2069 mutex_unlock(&lm->mtx);
2070 dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2071 return -EBUSY;
2072 }
2073
2074 /* Attach callback */
2075 bridge->lm_callback[monitor] = callback;
2076 bridge->lm_data[monitor] = data;
2077
2078 /* Enable Location Monitor interrupt */
2079 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2080 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2081 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2082
2083 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2084 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2085 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2086
2087 /* Ensure that global Location Monitor Enable set */
2088 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2089 lm_ctl |= TSI148_LCSR_LMAT_EN;
2090 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2091 }
2092
2093 mutex_unlock(&lm->mtx);
2094
2095 return 0;
2096 }
2097
2098 /*
2099 * Detach a callback function forn a specific location monitor.
2100 */
tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)2101 static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2102 {
2103 u32 lm_en, tmp;
2104 struct tsi148_driver *bridge;
2105
2106 bridge = lm->parent->driver_priv;
2107
2108 mutex_lock(&lm->mtx);
2109
2110 /* Disable Location Monitor and ensure previous interrupts are clear */
2111 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2112 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2113 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2114
2115 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2116 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2117 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2118
2119 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2120 bridge->base + TSI148_LCSR_INTC);
2121
2122 /* Detach callback */
2123 bridge->lm_callback[monitor] = NULL;
2124 bridge->lm_data[monitor] = NULL;
2125
2126 /* If all location monitors disabled, disable global Location Monitor */
2127 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2128 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2129 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2130 tmp &= ~TSI148_LCSR_LMAT_EN;
2131 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2132 }
2133
2134 mutex_unlock(&lm->mtx);
2135
2136 return 0;
2137 }
2138
2139 /*
2140 * Determine Geographical Addressing
2141 */
tsi148_slot_get(struct vme_bridge *tsi148_bridge)2142 static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2143 {
2144 u32 slot = 0;
2145 struct tsi148_driver *bridge;
2146
2147 bridge = tsi148_bridge->driver_priv;
2148
2149 if (!geoid) {
2150 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2151 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2152 } else
2153 slot = geoid;
2154
2155 return (int)slot;
2156 }
2157
tsi148_alloc_consistent(struct device *parent, size_t size, dma_addr_t *dma)2158 static void *tsi148_alloc_consistent(struct device *parent, size_t size,
2159 dma_addr_t *dma)
2160 {
2161 struct pci_dev *pdev;
2162
2163 /* Find pci_dev container of dev */
2164 pdev = to_pci_dev(parent);
2165
2166 return pci_alloc_consistent(pdev, size, dma);
2167 }
2168
tsi148_free_consistent(struct device *parent, size_t size, void *vaddr, dma_addr_t dma)2169 static void tsi148_free_consistent(struct device *parent, size_t size,
2170 void *vaddr, dma_addr_t dma)
2171 {
2172 struct pci_dev *pdev;
2173
2174 /* Find pci_dev container of dev */
2175 pdev = to_pci_dev(parent);
2176
2177 pci_free_consistent(pdev, size, vaddr, dma);
2178 }
2179
2180 /*
2181 * Configure CR/CSR space
2182 *
2183 * Access to the CR/CSR can be configured at power-up. The location of the
2184 * CR/CSR registers in the CR/CSR address space is determined by the boards
2185 * Auto-ID or Geographic address. This function ensures that the window is
2186 * enabled at an offset consistent with the boards geopgraphic address.
2187 *
2188 * Each board has a 512kB window, with the highest 4kB being used for the
2189 * boards registers, this means there is a fix length 508kB window which must
2190 * be mapped onto PCI memory.
2191 */
tsi148_crcsr_init(struct vme_bridge *tsi148_bridge, struct pci_dev *pdev)2192 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2193 struct pci_dev *pdev)
2194 {
2195 u32 cbar, crat, vstat;
2196 u32 crcsr_bus_high, crcsr_bus_low;
2197 int retval;
2198 struct tsi148_driver *bridge;
2199
2200 bridge = tsi148_bridge->driver_priv;
2201
2202 /* Allocate mem for CR/CSR image */
2203 bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2204 &bridge->crcsr_bus);
2205 if (!bridge->crcsr_kernel) {
2206 dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
2207 "CR/CSR image\n");
2208 return -ENOMEM;
2209 }
2210
2211 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2212
2213 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2214 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2215
2216 /* Ensure that the CR/CSR is configured at the correct offset */
2217 cbar = ioread32be(bridge->base + TSI148_CBAR);
2218 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2219
2220 vstat = tsi148_slot_get(tsi148_bridge);
2221
2222 if (cbar != vstat) {
2223 cbar = vstat;
2224 dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2225 iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2226 }
2227 dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2228
2229 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2230 if (crat & TSI148_LCSR_CRAT_EN)
2231 dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2232 else {
2233 dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2234 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2235 bridge->base + TSI148_LCSR_CRAT);
2236 }
2237
2238 /* If we want flushed, error-checked writes, set up a window
2239 * over the CR/CSR registers. We read from here to safely flush
2240 * through VME writes.
2241 */
2242 if (err_chk) {
2243 retval = tsi148_master_set(bridge->flush_image, 1,
2244 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2245 VME_D16);
2246 if (retval)
2247 dev_err(tsi148_bridge->parent, "Configuring flush image"
2248 " failed\n");
2249 }
2250
2251 return 0;
2252
2253 }
2254
tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge, struct pci_dev *pdev)2255 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2256 struct pci_dev *pdev)
2257 {
2258 u32 crat;
2259 struct tsi148_driver *bridge;
2260
2261 bridge = tsi148_bridge->driver_priv;
2262
2263 /* Turn off CR/CSR space */
2264 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2265 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2266 bridge->base + TSI148_LCSR_CRAT);
2267
2268 /* Free image */
2269 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2270 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2271
2272 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2273 bridge->crcsr_bus);
2274 }
2275
tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)2276 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2277 {
2278 int retval, i, master_num;
2279 u32 data;
2280 struct list_head *pos = NULL, *n;
2281 struct vme_bridge *tsi148_bridge;
2282 struct tsi148_driver *tsi148_device;
2283 struct vme_master_resource *master_image;
2284 struct vme_slave_resource *slave_image;
2285 struct vme_dma_resource *dma_ctrlr;
2286 struct vme_lm_resource *lm;
2287
2288 /* If we want to support more than one of each bridge, we need to
2289 * dynamically generate this so we get one per device
2290 */
2291 tsi148_bridge = kzalloc(sizeof(*tsi148_bridge), GFP_KERNEL);
2292 if (!tsi148_bridge) {
2293 retval = -ENOMEM;
2294 goto err_struct;
2295 }
2296 vme_init_bridge(tsi148_bridge);
2297
2298 tsi148_device = kzalloc(sizeof(*tsi148_device), GFP_KERNEL);
2299 if (!tsi148_device) {
2300 retval = -ENOMEM;
2301 goto err_driver;
2302 }
2303
2304 tsi148_bridge->driver_priv = tsi148_device;
2305
2306 /* Enable the device */
2307 retval = pci_enable_device(pdev);
2308 if (retval) {
2309 dev_err(&pdev->dev, "Unable to enable device\n");
2310 goto err_enable;
2311 }
2312
2313 /* Map Registers */
2314 retval = pci_request_regions(pdev, driver_name);
2315 if (retval) {
2316 dev_err(&pdev->dev, "Unable to reserve resources\n");
2317 goto err_resource;
2318 }
2319
2320 /* map registers in BAR 0 */
2321 tsi148_device->base = ioremap(pci_resource_start(pdev, 0),
2322 4096);
2323 if (!tsi148_device->base) {
2324 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2325 retval = -EIO;
2326 goto err_remap;
2327 }
2328
2329 /* Check to see if the mapping worked out */
2330 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2331 if (data != PCI_VENDOR_ID_TUNDRA) {
2332 dev_err(&pdev->dev, "CRG region check failed\n");
2333 retval = -EIO;
2334 goto err_test;
2335 }
2336
2337 /* Initialize wait queues & mutual exclusion flags */
2338 init_waitqueue_head(&tsi148_device->dma_queue[0]);
2339 init_waitqueue_head(&tsi148_device->dma_queue[1]);
2340 init_waitqueue_head(&tsi148_device->iack_queue);
2341 mutex_init(&tsi148_device->vme_int);
2342 mutex_init(&tsi148_device->vme_rmw);
2343
2344 tsi148_bridge->parent = &pdev->dev;
2345 strcpy(tsi148_bridge->name, driver_name);
2346
2347 /* Setup IRQ */
2348 retval = tsi148_irq_init(tsi148_bridge);
2349 if (retval != 0) {
2350 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2351 goto err_irq;
2352 }
2353
2354 /* If we are going to flush writes, we need to read from the VME bus.
2355 * We need to do this safely, thus we read the devices own CR/CSR
2356 * register. To do this we must set up a window in CR/CSR space and
2357 * hence have one less master window resource available.
2358 */
2359 master_num = TSI148_MAX_MASTER;
2360 if (err_chk) {
2361 master_num--;
2362
2363 tsi148_device->flush_image =
2364 kmalloc(sizeof(*tsi148_device->flush_image),
2365 GFP_KERNEL);
2366 if (!tsi148_device->flush_image) {
2367 retval = -ENOMEM;
2368 goto err_master;
2369 }
2370 tsi148_device->flush_image->parent = tsi148_bridge;
2371 spin_lock_init(&tsi148_device->flush_image->lock);
2372 tsi148_device->flush_image->locked = 1;
2373 tsi148_device->flush_image->number = master_num;
2374 memset(&tsi148_device->flush_image->bus_resource, 0,
2375 sizeof(tsi148_device->flush_image->bus_resource));
2376 tsi148_device->flush_image->kern_base = NULL;
2377 }
2378
2379 /* Add master windows to list */
2380 for (i = 0; i < master_num; i++) {
2381 master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
2382 if (!master_image) {
2383 retval = -ENOMEM;
2384 goto err_master;
2385 }
2386 master_image->parent = tsi148_bridge;
2387 spin_lock_init(&master_image->lock);
2388 master_image->locked = 0;
2389 master_image->number = i;
2390 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2391 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2392 VME_USER3 | VME_USER4;
2393 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2394 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2395 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2396 VME_PROG | VME_DATA;
2397 master_image->width_attr = VME_D16 | VME_D32;
2398 memset(&master_image->bus_resource, 0,
2399 sizeof(master_image->bus_resource));
2400 master_image->kern_base = NULL;
2401 list_add_tail(&master_image->list,
2402 &tsi148_bridge->master_resources);
2403 }
2404
2405 /* Add slave windows to list */
2406 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2407 slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
2408 if (!slave_image) {
2409 retval = -ENOMEM;
2410 goto err_slave;
2411 }
2412 slave_image->parent = tsi148_bridge;
2413 mutex_init(&slave_image->mtx);
2414 slave_image->locked = 0;
2415 slave_image->number = i;
2416 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2417 VME_A64;
2418 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2419 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2420 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2421 VME_PROG | VME_DATA;
2422 list_add_tail(&slave_image->list,
2423 &tsi148_bridge->slave_resources);
2424 }
2425
2426 /* Add dma engines to list */
2427 for (i = 0; i < TSI148_MAX_DMA; i++) {
2428 dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
2429 if (!dma_ctrlr) {
2430 retval = -ENOMEM;
2431 goto err_dma;
2432 }
2433 dma_ctrlr->parent = tsi148_bridge;
2434 mutex_init(&dma_ctrlr->mtx);
2435 dma_ctrlr->locked = 0;
2436 dma_ctrlr->number = i;
2437 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2438 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2439 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2440 VME_DMA_PATTERN_TO_MEM;
2441 INIT_LIST_HEAD(&dma_ctrlr->pending);
2442 INIT_LIST_HEAD(&dma_ctrlr->running);
2443 list_add_tail(&dma_ctrlr->list,
2444 &tsi148_bridge->dma_resources);
2445 }
2446
2447 /* Add location monitor to list */
2448 lm = kmalloc(sizeof(*lm), GFP_KERNEL);
2449 if (!lm) {
2450 retval = -ENOMEM;
2451 goto err_lm;
2452 }
2453 lm->parent = tsi148_bridge;
2454 mutex_init(&lm->mtx);
2455 lm->locked = 0;
2456 lm->number = 1;
2457 lm->monitors = 4;
2458 list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2459
2460 tsi148_bridge->slave_get = tsi148_slave_get;
2461 tsi148_bridge->slave_set = tsi148_slave_set;
2462 tsi148_bridge->master_get = tsi148_master_get;
2463 tsi148_bridge->master_set = tsi148_master_set;
2464 tsi148_bridge->master_read = tsi148_master_read;
2465 tsi148_bridge->master_write = tsi148_master_write;
2466 tsi148_bridge->master_rmw = tsi148_master_rmw;
2467 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2468 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2469 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2470 tsi148_bridge->irq_set = tsi148_irq_set;
2471 tsi148_bridge->irq_generate = tsi148_irq_generate;
2472 tsi148_bridge->lm_set = tsi148_lm_set;
2473 tsi148_bridge->lm_get = tsi148_lm_get;
2474 tsi148_bridge->lm_attach = tsi148_lm_attach;
2475 tsi148_bridge->lm_detach = tsi148_lm_detach;
2476 tsi148_bridge->slot_get = tsi148_slot_get;
2477 tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
2478 tsi148_bridge->free_consistent = tsi148_free_consistent;
2479
2480 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2481 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2482 (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2483 if (!geoid)
2484 dev_info(&pdev->dev, "VME geographical address is %d\n",
2485 data & TSI148_LCSR_VSTAT_GA_M);
2486 else
2487 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2488 geoid);
2489
2490 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2491 err_chk ? "enabled" : "disabled");
2492
2493 retval = tsi148_crcsr_init(tsi148_bridge, pdev);
2494 if (retval) {
2495 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2496 goto err_crcsr;
2497 }
2498
2499 retval = vme_register_bridge(tsi148_bridge);
2500 if (retval != 0) {
2501 dev_err(&pdev->dev, "Chip Registration failed.\n");
2502 goto err_reg;
2503 }
2504
2505 pci_set_drvdata(pdev, tsi148_bridge);
2506
2507 /* Clear VME bus "board fail", and "power-up reset" lines */
2508 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2509 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2510 data |= TSI148_LCSR_VSTAT_CPURST;
2511 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2512
2513 return 0;
2514
2515 err_reg:
2516 tsi148_crcsr_exit(tsi148_bridge, pdev);
2517 err_crcsr:
2518 err_lm:
2519 /* resources are stored in link list */
2520 list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) {
2521 lm = list_entry(pos, struct vme_lm_resource, list);
2522 list_del(pos);
2523 kfree(lm);
2524 }
2525 err_dma:
2526 /* resources are stored in link list */
2527 list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) {
2528 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2529 list_del(pos);
2530 kfree(dma_ctrlr);
2531 }
2532 err_slave:
2533 /* resources are stored in link list */
2534 list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) {
2535 slave_image = list_entry(pos, struct vme_slave_resource, list);
2536 list_del(pos);
2537 kfree(slave_image);
2538 }
2539 err_master:
2540 /* resources are stored in link list */
2541 list_for_each_safe(pos, n, &tsi148_bridge->master_resources) {
2542 master_image = list_entry(pos, struct vme_master_resource,
2543 list);
2544 list_del(pos);
2545 kfree(master_image);
2546 }
2547
2548 tsi148_irq_exit(tsi148_bridge, pdev);
2549 err_irq:
2550 err_test:
2551 iounmap(tsi148_device->base);
2552 err_remap:
2553 pci_release_regions(pdev);
2554 err_resource:
2555 pci_disable_device(pdev);
2556 err_enable:
2557 kfree(tsi148_device);
2558 err_driver:
2559 kfree(tsi148_bridge);
2560 err_struct:
2561 return retval;
2562
2563 }
2564
tsi148_remove(struct pci_dev *pdev)2565 static void tsi148_remove(struct pci_dev *pdev)
2566 {
2567 struct list_head *pos = NULL;
2568 struct list_head *tmplist;
2569 struct vme_master_resource *master_image;
2570 struct vme_slave_resource *slave_image;
2571 struct vme_dma_resource *dma_ctrlr;
2572 int i;
2573 struct tsi148_driver *bridge;
2574 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2575
2576 bridge = tsi148_bridge->driver_priv;
2577
2578
2579 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2580
2581 /*
2582 * Shutdown all inbound and outbound windows.
2583 */
2584 for (i = 0; i < 8; i++) {
2585 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2586 TSI148_LCSR_OFFSET_ITAT);
2587 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2588 TSI148_LCSR_OFFSET_OTAT);
2589 }
2590
2591 /*
2592 * Shutdown Location monitor.
2593 */
2594 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2595
2596 /*
2597 * Shutdown CRG map.
2598 */
2599 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2600
2601 /*
2602 * Clear error status.
2603 */
2604 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2605 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2606 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2607
2608 /*
2609 * Remove VIRQ interrupt (if any)
2610 */
2611 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2612 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2613
2614 /*
2615 * Map all Interrupts to PCI INTA
2616 */
2617 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2618 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2619
2620 tsi148_irq_exit(tsi148_bridge, pdev);
2621
2622 vme_unregister_bridge(tsi148_bridge);
2623
2624 tsi148_crcsr_exit(tsi148_bridge, pdev);
2625
2626 /* resources are stored in link list */
2627 list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2628 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2629 list_del(pos);
2630 kfree(dma_ctrlr);
2631 }
2632
2633 /* resources are stored in link list */
2634 list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2635 slave_image = list_entry(pos, struct vme_slave_resource, list);
2636 list_del(pos);
2637 kfree(slave_image);
2638 }
2639
2640 /* resources are stored in link list */
2641 list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2642 master_image = list_entry(pos, struct vme_master_resource,
2643 list);
2644 list_del(pos);
2645 kfree(master_image);
2646 }
2647
2648 iounmap(bridge->base);
2649
2650 pci_release_regions(pdev);
2651
2652 pci_disable_device(pdev);
2653
2654 kfree(tsi148_bridge->driver_priv);
2655
2656 kfree(tsi148_bridge);
2657 }
2658
2659 module_pci_driver(tsi148_driver);
2660
2661 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2662 module_param(err_chk, bool, 0);
2663
2664 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2665 module_param(geoid, int, 0);
2666
2667 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2668 MODULE_LICENSE("GPL");
2669