Lines Matching refs:dma

83 #include <linux/dma-mapping.h>
315 struct i596_dma *dma;
370 return lp->dma_addr + ((unsigned long)v - (unsigned long)lp->dma);
400 static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
402 dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
403 while (--delcnt && dma->iscp.stat) {
405 dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
409 dev->name, str, SWAP16(dma->iscp.stat));
416 static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
418 dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
419 while (--delcnt && dma->scb.command) {
421 dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
426 SWAP16(dma->scb.status),
427 SWAP16(dma->scb.command));
437 struct i596_dma *dma = lp->dma;
443 &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
445 &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
448 &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
449 SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
452 SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
453 SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
454 SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
485 dma_sync_cpu(dev, dma, sizeof(struct i596_dma));
491 struct i596_dma *dma = lp->dma;
498 for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
515 lp->rbd_head = dma->rbds;
516 rbd = dma->rbds + rx_ring_size - 1;
517 rbd->v_next = dma->rbds;
518 rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
522 for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
529 lp->rfd_head = dma->rfds;
530 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
531 rfd = dma->rfds;
533 rfd->v_prev = dma->rfds + rx_ring_size - 1;
534 rfd = dma->rfds + rx_ring_size - 1;
535 rfd->v_next = dma->rfds;
536 rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
539 dma_sync_dev(dev, dma, sizeof(struct i596_dma));
549 for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
563 struct i596_dma *dma = lp->dma;
569 dma->rfds[i].rbd = I596_NULL;
570 dma->rfds[i].cmd = SWAP16(CMD_FLEX);
572 dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
573 lp->rfd_head = dma->rfds;
574 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
575 lp->rbd_head = dma->rbds;
576 dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
578 dma_sync_dev(dev, dma, sizeof(struct i596_dma));
585 struct i596_dma *dma = lp->dma;
595 dma->scp.sysbus = SYSBUS;
596 dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
597 dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
598 dma->iscp.stat = SWAP32(ISCP_BUSY);
602 dma->scb.cmd = I596_NULL;
606 dma_sync_dev(dev, &(dma->scp), sizeof(struct i596_scp));
607 dma_sync_dev(dev, &(dma->iscp), sizeof(struct i596_iscp));
608 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
610 mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
612 if (wait_istat(dev, dma, 1000, "initialization timed out"))
626 dma->scb.command = 0;
627 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
631 memcpy(dma->cf_cmd.i596_config, init_setup, 14);
632 dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
633 dma_sync_dev(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
634 i596_add_cmd(dev, &dma->cf_cmd.cmd);
637 memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
638 dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
639 dma_sync_dev(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
640 i596_add_cmd(dev, &dma->sa_cmd.cmd);
643 dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
644 dma_sync_dev(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
645 i596_add_cmd(dev, &dma->tdr_cmd.cmd);
649 if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
654 dma->scb.command = SWAP16(RX_START);
655 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
656 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
661 if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
808 lp->dma->scb.rfd = rfd->b_next;
861 wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
862 lp->dma->scb.cmd = I596_NULL;
863 dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
875 wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
880 lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
881 dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
885 wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
899 struct i596_dma *dma = lp->dma;
919 wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
920 dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
921 dma->scb.command = SWAP16(CUC_START);
922 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
986 lp->dma->scb.command = SWAP16(CUC_START | RX_START);
987 dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
1016 tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
1017 tbd = lp->dma->tbds + lp->next_tx_cmd;
1096 memset(lp->dma, 0, sizeof(struct i596_dma));
1097 lp->dma->scb.command = 0;
1098 lp->dma->scb.cmd = I596_NULL;
1099 lp->dma->scb.rfd = I596_NULL;
1102 dma_sync_dev(dev, lp->dma, sizeof(struct i596_dma));
1112 "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1113 dev->name, lp->dma, (int)sizeof(struct i596_dma),
1114 &lp->dma->scb));
1132 struct i596_dma *dma;
1136 dma = lp->dma;
1140 wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1141 status = SWAP16(dma->scb.status);
1273 dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
1274 dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
1297 wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1298 dma->scb.command = SWAP16(ack_cmd);
1299 dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
1307 wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
1324 dev->name, SWAP16(lp->dma->scb.status)));
1328 wait_cmd(dev, lp->dma, 100, "close1 timed out");
1329 lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
1330 dma_sync_dev(dev, &lp->dma->scb, sizeof(struct i596_scb));
1334 wait_cmd(dev, lp->dma, 100, "close2 timed out");
1352 struct i596_dma *dma = lp->dma;
1363 !(dma->cf_cmd.i596_config[8] & 0x01)) {
1364 dma->cf_cmd.i596_config[8] |= 0x01;
1368 (dma->cf_cmd.i596_config[8] & 0x01)) {
1369 dma->cf_cmd.i596_config[8] &= ~0x01;
1373 (dma->cf_cmd.i596_config[11] & 0x20)) {
1374 dma->cf_cmd.i596_config[11] &= ~0x20;
1378 !(dma->cf_cmd.i596_config[11] & 0x20)) {
1379 dma->cf_cmd.i596_config[11] |= 0x20;
1383 if (dma->cf_cmd.cmd.command)
1388 dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
1389 dma_sync_dev(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
1390 i596_add_cmd(dev, &dma->cf_cmd.cmd);
1406 cmd = &dma->mc_cmd;
1421 dma_sync_dev(dev, &dma->mc_cmd, sizeof(struct mc_cmd));