1// SPDX-License-Identifier: GPL-1.0+ 2/* 3 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $ 4 * 5 * Device driver for Microgate SyncLink ISA and PCI 6 * high speed multiprotocol serial adapters. 7 * 8 * written by Paul Fulghum for Microgate Corporation 9 * paulkf@microgate.com 10 * 11 * Microgate and SyncLink are trademarks of Microgate Corporation 12 * 13 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds 14 * 15 * Original release 01/11/99 16 * 17 * This driver is primarily intended for use in synchronous 18 * HDLC mode. Asynchronous mode is also provided. 19 * 20 * When operating in synchronous mode, each call to mgsl_write() 21 * contains exactly one complete HDLC frame. Calling mgsl_put_char 22 * will start assembling an HDLC frame that will not be sent until 23 * mgsl_flush_chars or mgsl_write is called. 24 * 25 * Synchronous receive data is reported as complete frames. To accomplish 26 * this, the TTY flip buffer is bypassed (too small to hold largest 27 * frame and may fragment frames) and the line discipline 28 * receive entry point is called directly. 29 * 30 * This driver has been tested with a slightly modified ppp.c driver 31 * for synchronous PPP. 32 * 33 * 2000/02/16 34 * Added interface for syncppp.c driver (an alternate synchronous PPP 35 * implementation that also supports Cisco HDLC). Each device instance 36 * registers as a tty device AND a network device (if dosyncppp option 37 * is set for the device). The functionality is determined by which 38 * device interface is opened. 39 * 40 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 41 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 42 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 43 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 44 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 45 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 46 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 48 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 49 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 50 * OF THE POSSIBILITY OF SUCH DAMAGE. 51 */ 52 53#if defined(__i386__) 54# define BREAKPOINT() asm(" int $3"); 55#else 56# define BREAKPOINT() { } 57#endif 58 59#define MAX_ISA_DEVICES 10 60#define MAX_PCI_DEVICES 10 61#define MAX_TOTAL_DEVICES 20 62 63#include <linux/module.h> 64#include <linux/errno.h> 65#include <linux/signal.h> 66#include <linux/sched.h> 67#include <linux/timer.h> 68#include <linux/interrupt.h> 69#include <linux/pci.h> 70#include <linux/tty.h> 71#include <linux/tty_flip.h> 72#include <linux/serial.h> 73#include <linux/major.h> 74#include <linux/string.h> 75#include <linux/fcntl.h> 76#include <linux/ptrace.h> 77#include <linux/ioport.h> 78#include <linux/mm.h> 79#include <linux/seq_file.h> 80#include <linux/slab.h> 81#include <linux/delay.h> 82#include <linux/netdevice.h> 83#include <linux/vmalloc.h> 84#include <linux/init.h> 85#include <linux/ioctl.h> 86#include <linux/synclink.h> 87 88#include <asm/io.h> 89#include <asm/irq.h> 90#include <asm/dma.h> 91#include <linux/bitops.h> 92#include <asm/types.h> 93#include <linux/termios.h> 94#include <linux/workqueue.h> 95#include <linux/hdlc.h> 96#include <linux/dma-mapping.h> 97 98#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE)) 99#define SYNCLINK_GENERIC_HDLC 1 100#else 101#define SYNCLINK_GENERIC_HDLC 0 102#endif 103 104#define GET_USER(error,value,addr) error = get_user(value,addr) 105#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0 106#define PUT_USER(error,value,addr) error = put_user(value,addr) 107#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 108 109#include <linux/uaccess.h> 110 111#define RCLRVALUE 0xffff 112 113static MGSL_PARAMS default_params = { 114 MGSL_MODE_HDLC, /* unsigned long mode */ 115 0, /* unsigned char loopback; */ 116 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */ 117 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */ 118 0, /* unsigned long clock_speed; */ 119 0xff, /* unsigned char addr_filter; */ 120 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */ 121 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */ 122 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */ 123 9600, /* unsigned long data_rate; */ 124 8, /* unsigned char data_bits; */ 125 1, /* unsigned char stop_bits; */ 126 ASYNC_PARITY_NONE /* unsigned char parity; */ 127}; 128 129#define SHARED_MEM_ADDRESS_SIZE 0x40000 130#define BUFFERLISTSIZE 4096 131#define DMABUFFERSIZE 4096 132#define MAXRXFRAMES 7 133 134typedef struct _DMABUFFERENTRY 135{ 136 u32 phys_addr; /* 32-bit flat physical address of data buffer */ 137 volatile u16 count; /* buffer size/data count */ 138 volatile u16 status; /* Control/status field */ 139 volatile u16 rcc; /* character count field */ 140 u16 reserved; /* padding required by 16C32 */ 141 u32 link; /* 32-bit flat link to next buffer entry */ 142 char *virt_addr; /* virtual address of data buffer */ 143 u32 phys_entry; /* physical address of this buffer entry */ 144 dma_addr_t dma_addr; 145} DMABUFFERENTRY, *DMAPBUFFERENTRY; 146 147/* The queue of BH actions to be performed */ 148 149#define BH_RECEIVE 1 150#define BH_TRANSMIT 2 151#define BH_STATUS 4 152 153#define IO_PIN_SHUTDOWN_LIMIT 100 154 155struct _input_signal_events { 156 int ri_up; 157 int ri_down; 158 int dsr_up; 159 int dsr_down; 160 int dcd_up; 161 int dcd_down; 162 int cts_up; 163 int cts_down; 164}; 165 166/* transmit holding buffer definitions*/ 167#define MAX_TX_HOLDING_BUFFERS 5 168struct tx_holding_buffer { 169 int buffer_size; 170 unsigned char * buffer; 171}; 172 173 174/* 175 * Device instance data structure 176 */ 177 178struct mgsl_struct { 179 int magic; 180 struct tty_port port; 181 int line; 182 int hw_version; 183 184 struct mgsl_icount icount; 185 186 int timeout; 187 int x_char; /* xon/xoff character */ 188 u16 read_status_mask; 189 u16 ignore_status_mask; 190 unsigned char *xmit_buf; 191 int xmit_head; 192 int xmit_tail; 193 int xmit_cnt; 194 195 wait_queue_head_t status_event_wait_q; 196 wait_queue_head_t event_wait_q; 197 struct timer_list tx_timer; /* HDLC transmit timeout timer */ 198 struct mgsl_struct *next_device; /* device list link */ 199 200 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */ 201 struct work_struct task; /* task structure for scheduling bh */ 202 203 u32 EventMask; /* event trigger mask */ 204 u32 RecordedEvents; /* pending events */ 205 206 u32 max_frame_size; /* as set by device config */ 207 208 u32 pending_bh; 209 210 bool bh_running; /* Protection from multiple */ 211 int isr_overflow; 212 bool bh_requested; 213 214 int dcd_chkcount; /* check counts to prevent */ 215 int cts_chkcount; /* too many IRQs if a signal */ 216 int dsr_chkcount; /* is floating */ 217 int ri_chkcount; 218 219 char *buffer_list; /* virtual address of Rx & Tx buffer lists */ 220 u32 buffer_list_phys; 221 dma_addr_t buffer_list_dma_addr; 222 223 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */ 224 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */ 225 unsigned int current_rx_buffer; 226 227 int num_tx_dma_buffers; /* number of tx dma frames required */ 228 int tx_dma_buffers_used; 229 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */ 230 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */ 231 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */ 232 int current_tx_buffer; /* next tx dma buffer to be loaded */ 233 234 unsigned char *intermediate_rxbuffer; 235 236 int num_tx_holding_buffers; /* number of tx holding buffer allocated */ 237 int get_tx_holding_index; /* next tx holding buffer for adapter to load */ 238 int put_tx_holding_index; /* next tx holding buffer to store user request */ 239 int tx_holding_count; /* number of tx holding buffers waiting */ 240 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS]; 241 242 bool rx_enabled; 243 bool rx_overflow; 244 bool rx_rcc_underrun; 245 246 bool tx_enabled; 247 bool tx_active; 248 u32 idle_mode; 249 250 u16 cmr_value; 251 u16 tcsr_value; 252 253 char device_name[25]; /* device instance name */ 254 255 unsigned char bus; /* expansion bus number (zero based) */ 256 unsigned char function; /* PCI device number */ 257 258 unsigned int io_base; /* base I/O address of adapter */ 259 unsigned int io_addr_size; /* size of the I/O address range */ 260 bool io_addr_requested; /* true if I/O address requested */ 261 262 unsigned int irq_level; /* interrupt level */ 263 unsigned long irq_flags; 264 bool irq_requested; /* true if IRQ requested */ 265 266 unsigned int dma_level; /* DMA channel */ 267 bool dma_requested; /* true if dma channel requested */ 268 269 u16 mbre_bit; 270 u16 loopback_bits; 271 u16 usc_idle_mode; 272 273 MGSL_PARAMS params; /* communications parameters */ 274 275 unsigned char serial_signals; /* current serial signal states */ 276 277 bool irq_occurred; /* for diagnostics use */ 278 unsigned int init_error; /* Initialization startup error (DIAGS) */ 279 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */ 280 281 u32 last_mem_alloc; 282 unsigned char* memory_base; /* shared memory address (PCI only) */ 283 u32 phys_memory_base; 284 bool shared_mem_requested; 285 286 unsigned char* lcr_base; /* local config registers (PCI only) */ 287 u32 phys_lcr_base; 288 u32 lcr_offset; 289 bool lcr_mem_requested; 290 291 u32 misc_ctrl_value; 292 char *flag_buf; 293 bool drop_rts_on_tx_done; 294 295 bool loopmode_insert_requested; 296 bool loopmode_send_done_requested; 297 298 struct _input_signal_events input_signal_events; 299 300 /* generic HDLC device parts */ 301 int netcount; 302 spinlock_t netlock; 303 304#if SYNCLINK_GENERIC_HDLC 305 struct net_device *netdev; 306#endif 307}; 308 309#define MGSL_MAGIC 0x5401 310 311/* 312 * The size of the serial xmit buffer is 1 page, or 4096 bytes 313 */ 314#ifndef SERIAL_XMIT_SIZE 315#define SERIAL_XMIT_SIZE 4096 316#endif 317 318/* 319 * These macros define the offsets used in calculating the 320 * I/O address of the specified USC registers. 321 */ 322 323 324#define DCPIN 2 /* Bit 1 of I/O address */ 325#define SDPIN 4 /* Bit 2 of I/O address */ 326 327#define DCAR 0 /* DMA command/address register */ 328#define CCAR SDPIN /* channel command/address register */ 329#define DATAREG DCPIN + SDPIN /* serial data register */ 330#define MSBONLY 0x41 331#define LSBONLY 0x40 332 333/* 334 * These macros define the register address (ordinal number) 335 * used for writing address/value pairs to the USC. 336 */ 337 338#define CMR 0x02 /* Channel mode Register */ 339#define CCSR 0x04 /* Channel Command/status Register */ 340#define CCR 0x06 /* Channel Control Register */ 341#define PSR 0x08 /* Port status Register */ 342#define PCR 0x0a /* Port Control Register */ 343#define TMDR 0x0c /* Test mode Data Register */ 344#define TMCR 0x0e /* Test mode Control Register */ 345#define CMCR 0x10 /* Clock mode Control Register */ 346#define HCR 0x12 /* Hardware Configuration Register */ 347#define IVR 0x14 /* Interrupt Vector Register */ 348#define IOCR 0x16 /* Input/Output Control Register */ 349#define ICR 0x18 /* Interrupt Control Register */ 350#define DCCR 0x1a /* Daisy Chain Control Register */ 351#define MISR 0x1c /* Misc Interrupt status Register */ 352#define SICR 0x1e /* status Interrupt Control Register */ 353#define RDR 0x20 /* Receive Data Register */ 354#define RMR 0x22 /* Receive mode Register */ 355#define RCSR 0x24 /* Receive Command/status Register */ 356#define RICR 0x26 /* Receive Interrupt Control Register */ 357#define RSR 0x28 /* Receive Sync Register */ 358#define RCLR 0x2a /* Receive count Limit Register */ 359#define RCCR 0x2c /* Receive Character count Register */ 360#define TC0R 0x2e /* Time Constant 0 Register */ 361#define TDR 0x30 /* Transmit Data Register */ 362#define TMR 0x32 /* Transmit mode Register */ 363#define TCSR 0x34 /* Transmit Command/status Register */ 364#define TICR 0x36 /* Transmit Interrupt Control Register */ 365#define TSR 0x38 /* Transmit Sync Register */ 366#define TCLR 0x3a /* Transmit count Limit Register */ 367#define TCCR 0x3c /* Transmit Character count Register */ 368#define TC1R 0x3e /* Time Constant 1 Register */ 369 370 371/* 372 * MACRO DEFINITIONS FOR DMA REGISTERS 373 */ 374 375#define DCR 0x06 /* DMA Control Register (shared) */ 376#define DACR 0x08 /* DMA Array count Register (shared) */ 377#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */ 378#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */ 379#define DICR 0x18 /* DMA Interrupt Control Register (shared) */ 380#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */ 381#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */ 382 383#define TDMR 0x02 /* Transmit DMA mode Register */ 384#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */ 385#define TBCR 0x2a /* Transmit Byte count Register */ 386#define TARL 0x2c /* Transmit Address Register (low) */ 387#define TARU 0x2e /* Transmit Address Register (high) */ 388#define NTBCR 0x3a /* Next Transmit Byte count Register */ 389#define NTARL 0x3c /* Next Transmit Address Register (low) */ 390#define NTARU 0x3e /* Next Transmit Address Register (high) */ 391 392#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */ 393#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */ 394#define RBCR 0xaa /* Receive Byte count Register */ 395#define RARL 0xac /* Receive Address Register (low) */ 396#define RARU 0xae /* Receive Address Register (high) */ 397#define NRBCR 0xba /* Next Receive Byte count Register */ 398#define NRARL 0xbc /* Next Receive Address Register (low) */ 399#define NRARU 0xbe /* Next Receive Address Register (high) */ 400 401 402/* 403 * MACRO DEFINITIONS FOR MODEM STATUS BITS 404 */ 405 406#define MODEMSTATUS_DTR 0x80 407#define MODEMSTATUS_DSR 0x40 408#define MODEMSTATUS_RTS 0x20 409#define MODEMSTATUS_CTS 0x10 410#define MODEMSTATUS_RI 0x04 411#define MODEMSTATUS_DCD 0x01 412 413 414/* 415 * Channel Command/Address Register (CCAR) Command Codes 416 */ 417 418#define RTCmd_Null 0x0000 419#define RTCmd_ResetHighestIus 0x1000 420#define RTCmd_TriggerChannelLoadDma 0x2000 421#define RTCmd_TriggerRxDma 0x2800 422#define RTCmd_TriggerTxDma 0x3000 423#define RTCmd_TriggerRxAndTxDma 0x3800 424#define RTCmd_PurgeRxFifo 0x4800 425#define RTCmd_PurgeTxFifo 0x5000 426#define RTCmd_PurgeRxAndTxFifo 0x5800 427#define RTCmd_LoadRcc 0x6800 428#define RTCmd_LoadTcc 0x7000 429#define RTCmd_LoadRccAndTcc 0x7800 430#define RTCmd_LoadTC0 0x8800 431#define RTCmd_LoadTC1 0x9000 432#define RTCmd_LoadTC0AndTC1 0x9800 433#define RTCmd_SerialDataLSBFirst 0xa000 434#define RTCmd_SerialDataMSBFirst 0xa800 435#define RTCmd_SelectBigEndian 0xb000 436#define RTCmd_SelectLittleEndian 0xb800 437 438 439/* 440 * DMA Command/Address Register (DCAR) Command Codes 441 */ 442 443#define DmaCmd_Null 0x0000 444#define DmaCmd_ResetTxChannel 0x1000 445#define DmaCmd_ResetRxChannel 0x1200 446#define DmaCmd_StartTxChannel 0x2000 447#define DmaCmd_StartRxChannel 0x2200 448#define DmaCmd_ContinueTxChannel 0x3000 449#define DmaCmd_ContinueRxChannel 0x3200 450#define DmaCmd_PauseTxChannel 0x4000 451#define DmaCmd_PauseRxChannel 0x4200 452#define DmaCmd_AbortTxChannel 0x5000 453#define DmaCmd_AbortRxChannel 0x5200 454#define DmaCmd_InitTxChannel 0x7000 455#define DmaCmd_InitRxChannel 0x7200 456#define DmaCmd_ResetHighestDmaIus 0x8000 457#define DmaCmd_ResetAllChannels 0x9000 458#define DmaCmd_StartAllChannels 0xa000 459#define DmaCmd_ContinueAllChannels 0xb000 460#define DmaCmd_PauseAllChannels 0xc000 461#define DmaCmd_AbortAllChannels 0xd000 462#define DmaCmd_InitAllChannels 0xf000 463 464#define TCmd_Null 0x0000 465#define TCmd_ClearTxCRC 0x2000 466#define TCmd_SelectTicrTtsaData 0x4000 467#define TCmd_SelectTicrTxFifostatus 0x5000 468#define TCmd_SelectTicrIntLevel 0x6000 469#define TCmd_SelectTicrdma_level 0x7000 470#define TCmd_SendFrame 0x8000 471#define TCmd_SendAbort 0x9000 472#define TCmd_EnableDleInsertion 0xc000 473#define TCmd_DisableDleInsertion 0xd000 474#define TCmd_ClearEofEom 0xe000 475#define TCmd_SetEofEom 0xf000 476 477#define RCmd_Null 0x0000 478#define RCmd_ClearRxCRC 0x2000 479#define RCmd_EnterHuntmode 0x3000 480#define RCmd_SelectRicrRtsaData 0x4000 481#define RCmd_SelectRicrRxFifostatus 0x5000 482#define RCmd_SelectRicrIntLevel 0x6000 483#define RCmd_SelectRicrdma_level 0x7000 484 485/* 486 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR) 487 */ 488 489#define RECEIVE_STATUS BIT5 490#define RECEIVE_DATA BIT4 491#define TRANSMIT_STATUS BIT3 492#define TRANSMIT_DATA BIT2 493#define IO_PIN BIT1 494#define MISC BIT0 495 496 497/* 498 * Receive status Bits in Receive Command/status Register RCSR 499 */ 500 501#define RXSTATUS_SHORT_FRAME BIT8 502#define RXSTATUS_CODE_VIOLATION BIT8 503#define RXSTATUS_EXITED_HUNT BIT7 504#define RXSTATUS_IDLE_RECEIVED BIT6 505#define RXSTATUS_BREAK_RECEIVED BIT5 506#define RXSTATUS_ABORT_RECEIVED BIT5 507#define RXSTATUS_RXBOUND BIT4 508#define RXSTATUS_CRC_ERROR BIT3 509#define RXSTATUS_FRAMING_ERROR BIT3 510#define RXSTATUS_ABORT BIT2 511#define RXSTATUS_PARITY_ERROR BIT2 512#define RXSTATUS_OVERRUN BIT1 513#define RXSTATUS_DATA_AVAILABLE BIT0 514#define RXSTATUS_ALL 0x01f6 515#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) ) 516 517/* 518 * Values for setting transmit idle mode in 519 * Transmit Control/status Register (TCSR) 520 */ 521#define IDLEMODE_FLAGS 0x0000 522#define IDLEMODE_ALT_ONE_ZERO 0x0100 523#define IDLEMODE_ZERO 0x0200 524#define IDLEMODE_ONE 0x0300 525#define IDLEMODE_ALT_MARK_SPACE 0x0500 526#define IDLEMODE_SPACE 0x0600 527#define IDLEMODE_MARK 0x0700 528#define IDLEMODE_MASK 0x0700 529 530/* 531 * IUSC revision identifiers 532 */ 533#define IUSC_SL1660 0x4d44 534#define IUSC_PRE_SL1660 0x4553 535 536/* 537 * Transmit status Bits in Transmit Command/status Register (TCSR) 538 */ 539 540#define TCSR_PRESERVE 0x0F00 541 542#define TCSR_UNDERWAIT BIT11 543#define TXSTATUS_PREAMBLE_SENT BIT7 544#define TXSTATUS_IDLE_SENT BIT6 545#define TXSTATUS_ABORT_SENT BIT5 546#define TXSTATUS_EOF_SENT BIT4 547#define TXSTATUS_EOM_SENT BIT4 548#define TXSTATUS_CRC_SENT BIT3 549#define TXSTATUS_ALL_SENT BIT2 550#define TXSTATUS_UNDERRUN BIT1 551#define TXSTATUS_FIFO_EMPTY BIT0 552#define TXSTATUS_ALL 0x00fa 553#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) ) 554 555 556#define MISCSTATUS_RXC_LATCHED BIT15 557#define MISCSTATUS_RXC BIT14 558#define MISCSTATUS_TXC_LATCHED BIT13 559#define MISCSTATUS_TXC BIT12 560#define MISCSTATUS_RI_LATCHED BIT11 561#define MISCSTATUS_RI BIT10 562#define MISCSTATUS_DSR_LATCHED BIT9 563#define MISCSTATUS_DSR BIT8 564#define MISCSTATUS_DCD_LATCHED BIT7 565#define MISCSTATUS_DCD BIT6 566#define MISCSTATUS_CTS_LATCHED BIT5 567#define MISCSTATUS_CTS BIT4 568#define MISCSTATUS_RCC_UNDERRUN BIT3 569#define MISCSTATUS_DPLL_NO_SYNC BIT2 570#define MISCSTATUS_BRG1_ZERO BIT1 571#define MISCSTATUS_BRG0_ZERO BIT0 572 573#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0)) 574#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f)) 575 576#define SICR_RXC_ACTIVE BIT15 577#define SICR_RXC_INACTIVE BIT14 578#define SICR_RXC (BIT15|BIT14) 579#define SICR_TXC_ACTIVE BIT13 580#define SICR_TXC_INACTIVE BIT12 581#define SICR_TXC (BIT13|BIT12) 582#define SICR_RI_ACTIVE BIT11 583#define SICR_RI_INACTIVE BIT10 584#define SICR_RI (BIT11|BIT10) 585#define SICR_DSR_ACTIVE BIT9 586#define SICR_DSR_INACTIVE BIT8 587#define SICR_DSR (BIT9|BIT8) 588#define SICR_DCD_ACTIVE BIT7 589#define SICR_DCD_INACTIVE BIT6 590#define SICR_DCD (BIT7|BIT6) 591#define SICR_CTS_ACTIVE BIT5 592#define SICR_CTS_INACTIVE BIT4 593#define SICR_CTS (BIT5|BIT4) 594#define SICR_RCC_UNDERFLOW BIT3 595#define SICR_DPLL_NO_SYNC BIT2 596#define SICR_BRG1_ZERO BIT1 597#define SICR_BRG0_ZERO BIT0 598 599void usc_DisableMasterIrqBit( struct mgsl_struct *info ); 600void usc_EnableMasterIrqBit( struct mgsl_struct *info ); 601void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask ); 602void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask ); 603void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask ); 604 605#define usc_EnableInterrupts( a, b ) \ 606 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) ) 607 608#define usc_DisableInterrupts( a, b ) \ 609 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) ) 610 611#define usc_EnableMasterIrqBit(a) \ 612 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) ) 613 614#define usc_DisableMasterIrqBit(a) \ 615 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) ) 616 617#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) ) 618 619/* 620 * Transmit status Bits in Transmit Control status Register (TCSR) 621 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) 622 */ 623 624#define TXSTATUS_PREAMBLE_SENT BIT7 625#define TXSTATUS_IDLE_SENT BIT6 626#define TXSTATUS_ABORT_SENT BIT5 627#define TXSTATUS_EOF BIT4 628#define TXSTATUS_CRC_SENT BIT3 629#define TXSTATUS_ALL_SENT BIT2 630#define TXSTATUS_UNDERRUN BIT1 631#define TXSTATUS_FIFO_EMPTY BIT0 632 633#define DICR_MASTER BIT15 634#define DICR_TRANSMIT BIT0 635#define DICR_RECEIVE BIT1 636 637#define usc_EnableDmaInterrupts(a,b) \ 638 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) ) 639 640#define usc_DisableDmaInterrupts(a,b) \ 641 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) ) 642 643#define usc_EnableStatusIrqs(a,b) \ 644 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) ) 645 646#define usc_DisablestatusIrqs(a,b) \ 647 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) ) 648 649/* Transmit status Bits in Transmit Control status Register (TCSR) */ 650/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */ 651 652 653#define DISABLE_UNCONDITIONAL 0 654#define DISABLE_END_OF_FRAME 1 655#define ENABLE_UNCONDITIONAL 2 656#define ENABLE_AUTO_CTS 3 657#define ENABLE_AUTO_DCD 3 658#define usc_EnableTransmitter(a,b) \ 659 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) ) 660#define usc_EnableReceiver(a,b) \ 661 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) ) 662 663static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port ); 664static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value ); 665static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd ); 666 667static u16 usc_InReg( struct mgsl_struct *info, u16 Port ); 668static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value ); 669static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd ); 670void usc_RCmd( struct mgsl_struct *info, u16 Cmd ); 671void usc_TCmd( struct mgsl_struct *info, u16 Cmd ); 672 673#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b))) 674#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b)) 675 676#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1)) 677 678static void usc_process_rxoverrun_sync( struct mgsl_struct *info ); 679static void usc_start_receiver( struct mgsl_struct *info ); 680static void usc_stop_receiver( struct mgsl_struct *info ); 681 682static void usc_start_transmitter( struct mgsl_struct *info ); 683static void usc_stop_transmitter( struct mgsl_struct *info ); 684static void usc_set_txidle( struct mgsl_struct *info ); 685static void usc_load_txfifo( struct mgsl_struct *info ); 686 687static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate ); 688static void usc_enable_loopback( struct mgsl_struct *info, int enable ); 689 690static void usc_get_serial_signals( struct mgsl_struct *info ); 691static void usc_set_serial_signals( struct mgsl_struct *info ); 692 693static void usc_reset( struct mgsl_struct *info ); 694 695static void usc_set_sync_mode( struct mgsl_struct *info ); 696static void usc_set_sdlc_mode( struct mgsl_struct *info ); 697static void usc_set_async_mode( struct mgsl_struct *info ); 698static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate ); 699 700static void usc_loopback_frame( struct mgsl_struct *info ); 701 702static void mgsl_tx_timeout(struct timer_list *t); 703 704 705static void usc_loopmode_cancel_transmit( struct mgsl_struct * info ); 706static void usc_loopmode_insert_request( struct mgsl_struct * info ); 707static int usc_loopmode_active( struct mgsl_struct * info); 708static void usc_loopmode_send_done( struct mgsl_struct * info ); 709 710static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg); 711 712#if SYNCLINK_GENERIC_HDLC 713#define dev_to_port(D) (dev_to_hdlc(D)->priv) 714static void hdlcdev_tx_done(struct mgsl_struct *info); 715static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size); 716static int hdlcdev_init(struct mgsl_struct *info); 717static void hdlcdev_exit(struct mgsl_struct *info); 718#endif 719 720/* 721 * Defines a BUS descriptor value for the PCI adapter 722 * local bus address ranges. 723 */ 724 725#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \ 726(0x00400020 + \ 727((WrHold) << 30) + \ 728((WrDly) << 28) + \ 729((RdDly) << 26) + \ 730((Nwdd) << 20) + \ 731((Nwad) << 15) + \ 732((Nxda) << 13) + \ 733((Nrdd) << 11) + \ 734((Nrad) << 6) ) 735 736static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit); 737 738/* 739 * Adapter diagnostic routines 740 */ 741static bool mgsl_register_test( struct mgsl_struct *info ); 742static bool mgsl_irq_test( struct mgsl_struct *info ); 743static bool mgsl_dma_test( struct mgsl_struct *info ); 744static bool mgsl_memory_test( struct mgsl_struct *info ); 745static int mgsl_adapter_test( struct mgsl_struct *info ); 746 747/* 748 * device and resource management routines 749 */ 750static int mgsl_claim_resources(struct mgsl_struct *info); 751static void mgsl_release_resources(struct mgsl_struct *info); 752static void mgsl_add_device(struct mgsl_struct *info); 753static struct mgsl_struct* mgsl_allocate_device(void); 754 755/* 756 * DMA buffer manupulation functions. 757 */ 758static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex ); 759static bool mgsl_get_rx_frame( struct mgsl_struct *info ); 760static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info ); 761static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info ); 762static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info ); 763static int num_free_tx_dma_buffers(struct mgsl_struct *info); 764static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize); 765static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count); 766 767/* 768 * DMA and Shared Memory buffer allocation and formatting 769 */ 770static int mgsl_allocate_dma_buffers(struct mgsl_struct *info); 771static void mgsl_free_dma_buffers(struct mgsl_struct *info); 772static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount); 773static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount); 774static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info); 775static void mgsl_free_buffer_list_memory(struct mgsl_struct *info); 776static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info); 777static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info); 778static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info); 779static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info); 780static bool load_next_tx_holding_buffer(struct mgsl_struct *info); 781static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize); 782 783/* 784 * Bottom half interrupt handlers 785 */ 786static void mgsl_bh_handler(struct work_struct *work); 787static void mgsl_bh_receive(struct mgsl_struct *info); 788static void mgsl_bh_transmit(struct mgsl_struct *info); 789static void mgsl_bh_status(struct mgsl_struct *info); 790 791/* 792 * Interrupt handler routines and dispatch table. 793 */ 794static void mgsl_isr_null( struct mgsl_struct *info ); 795static void mgsl_isr_transmit_data( struct mgsl_struct *info ); 796static void mgsl_isr_receive_data( struct mgsl_struct *info ); 797static void mgsl_isr_receive_status( struct mgsl_struct *info ); 798static void mgsl_isr_transmit_status( struct mgsl_struct *info ); 799static void mgsl_isr_io_pin( struct mgsl_struct *info ); 800static void mgsl_isr_misc( struct mgsl_struct *info ); 801static void mgsl_isr_receive_dma( struct mgsl_struct *info ); 802static void mgsl_isr_transmit_dma( struct mgsl_struct *info ); 803 804typedef void (*isr_dispatch_func)(struct mgsl_struct *); 805 806static isr_dispatch_func UscIsrTable[7] = 807{ 808 mgsl_isr_null, 809 mgsl_isr_misc, 810 mgsl_isr_io_pin, 811 mgsl_isr_transmit_data, 812 mgsl_isr_transmit_status, 813 mgsl_isr_receive_data, 814 mgsl_isr_receive_status 815}; 816 817/* 818 * ioctl call handlers 819 */ 820static int tiocmget(struct tty_struct *tty); 821static int tiocmset(struct tty_struct *tty, 822 unsigned int set, unsigned int clear); 823static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount 824 __user *user_icount); 825static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params); 826static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params); 827static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode); 828static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode); 829static int mgsl_txenable(struct mgsl_struct * info, int enable); 830static int mgsl_txabort(struct mgsl_struct * info); 831static int mgsl_rxenable(struct mgsl_struct * info, int enable); 832static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask); 833static int mgsl_loopmode_send_done( struct mgsl_struct * info ); 834 835/* set non-zero on successful registration with PCI subsystem */ 836static bool pci_registered; 837 838/* 839 * Global linked list of SyncLink devices 840 */ 841static struct mgsl_struct *mgsl_device_list; 842static int mgsl_device_count; 843 844/* 845 * Set this param to non-zero to load eax with the 846 * .text section address and breakpoint on module load. 847 * This is useful for use with gdb and add-symbol-file command. 848 */ 849static bool break_on_load; 850 851/* 852 * Driver major number, defaults to zero to get auto 853 * assigned major number. May be forced as module parameter. 854 */ 855static int ttymajor; 856 857/* 858 * Array of user specified options for ISA adapters. 859 */ 860static int io[MAX_ISA_DEVICES]; 861static int irq[MAX_ISA_DEVICES]; 862static int dma[MAX_ISA_DEVICES]; 863static int debug_level; 864static int maxframe[MAX_TOTAL_DEVICES]; 865static int txdmabufs[MAX_TOTAL_DEVICES]; 866static int txholdbufs[MAX_TOTAL_DEVICES]; 867 868module_param(break_on_load, bool, 0); 869module_param(ttymajor, int, 0); 870module_param_hw_array(io, int, ioport, NULL, 0); 871module_param_hw_array(irq, int, irq, NULL, 0); 872module_param_hw_array(dma, int, dma, NULL, 0); 873module_param(debug_level, int, 0); 874module_param_array(maxframe, int, NULL, 0); 875module_param_array(txdmabufs, int, NULL, 0); 876module_param_array(txholdbufs, int, NULL, 0); 877 878static char *driver_name = "SyncLink serial driver"; 879static char *driver_version = "$Revision: 4.38 $"; 880 881static int synclink_init_one (struct pci_dev *dev, 882 const struct pci_device_id *ent); 883static void synclink_remove_one (struct pci_dev *dev); 884 885static const struct pci_device_id synclink_pci_tbl[] = { 886 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, }, 887 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, }, 888 { 0, }, /* terminate list */ 889}; 890MODULE_DEVICE_TABLE(pci, synclink_pci_tbl); 891 892MODULE_LICENSE("GPL"); 893 894static struct pci_driver synclink_pci_driver = { 895 .name = "synclink", 896 .id_table = synclink_pci_tbl, 897 .probe = synclink_init_one, 898 .remove = synclink_remove_one, 899}; 900 901static struct tty_driver *serial_driver; 902 903/* number of characters left in xmit buffer before we ask for more */ 904#define WAKEUP_CHARS 256 905 906 907static void mgsl_change_params(struct mgsl_struct *info); 908static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout); 909 910/* 911 * 1st function defined in .text section. Calling this function in 912 * init_module() followed by a breakpoint allows a remote debugger 913 * (gdb) to get the .text address for the add-symbol-file command. 914 * This allows remote debugging of dynamically loadable modules. 915 */ 916static void* mgsl_get_text_ptr(void) 917{ 918 return mgsl_get_text_ptr; 919} 920 921static inline int mgsl_paranoia_check(struct mgsl_struct *info, 922 char *name, const char *routine) 923{ 924#ifdef MGSL_PARANOIA_CHECK 925 static const char *badmagic = 926 "Warning: bad magic number for mgsl struct (%s) in %s\n"; 927 static const char *badinfo = 928 "Warning: null mgsl_struct for (%s) in %s\n"; 929 930 if (!info) { 931 printk(badinfo, name, routine); 932 return 1; 933 } 934 if (info->magic != MGSL_MAGIC) { 935 printk(badmagic, name, routine); 936 return 1; 937 } 938#else 939 if (!info) 940 return 1; 941#endif 942 return 0; 943} 944 945/* 946 * line discipline callback wrappers 947 * 948 * The wrappers maintain line discipline references 949 * while calling into the line discipline. 950 * 951 * ldisc_receive_buf - pass receive data to line discipline 952 */ 953 954static void ldisc_receive_buf(struct tty_struct *tty, 955 const __u8 *data, char *flags, int count) 956{ 957 struct tty_ldisc *ld; 958 if (!tty) 959 return; 960 ld = tty_ldisc_ref(tty); 961 if (ld) { 962 if (ld->ops->receive_buf) 963 ld->ops->receive_buf(tty, data, flags, count); 964 tty_ldisc_deref(ld); 965 } 966} 967 968/* mgsl_stop() throttle (stop) transmitter 969 * 970 * Arguments: tty pointer to tty info structure 971 * Return Value: None 972 */ 973static void mgsl_stop(struct tty_struct *tty) 974{ 975 struct mgsl_struct *info = tty->driver_data; 976 unsigned long flags; 977 978 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop")) 979 return; 980 981 if ( debug_level >= DEBUG_LEVEL_INFO ) 982 printk("mgsl_stop(%s)\n",info->device_name); 983 984 spin_lock_irqsave(&info->irq_spinlock,flags); 985 if (info->tx_enabled) 986 usc_stop_transmitter(info); 987 spin_unlock_irqrestore(&info->irq_spinlock,flags); 988 989} /* end of mgsl_stop() */ 990 991/* mgsl_start() release (start) transmitter 992 * 993 * Arguments: tty pointer to tty info structure 994 * Return Value: None 995 */ 996static void mgsl_start(struct tty_struct *tty) 997{ 998 struct mgsl_struct *info = tty->driver_data; 999 unsigned long flags; 1000 1001 if (mgsl_paranoia_check(info, tty->name, "mgsl_start")) 1002 return; 1003 1004 if ( debug_level >= DEBUG_LEVEL_INFO ) 1005 printk("mgsl_start(%s)\n",info->device_name); 1006 1007 spin_lock_irqsave(&info->irq_spinlock,flags); 1008 if (!info->tx_enabled) 1009 usc_start_transmitter(info); 1010 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1011 1012} /* end of mgsl_start() */ 1013 1014/* 1015 * Bottom half work queue access functions 1016 */ 1017 1018/* mgsl_bh_action() Return next bottom half action to perform. 1019 * Return Value: BH action code or 0 if nothing to do. 1020 */ 1021static int mgsl_bh_action(struct mgsl_struct *info) 1022{ 1023 unsigned long flags; 1024 int rc = 0; 1025 1026 spin_lock_irqsave(&info->irq_spinlock,flags); 1027 1028 if (info->pending_bh & BH_RECEIVE) { 1029 info->pending_bh &= ~BH_RECEIVE; 1030 rc = BH_RECEIVE; 1031 } else if (info->pending_bh & BH_TRANSMIT) { 1032 info->pending_bh &= ~BH_TRANSMIT; 1033 rc = BH_TRANSMIT; 1034 } else if (info->pending_bh & BH_STATUS) { 1035 info->pending_bh &= ~BH_STATUS; 1036 rc = BH_STATUS; 1037 } 1038 1039 if (!rc) { 1040 /* Mark BH routine as complete */ 1041 info->bh_running = false; 1042 info->bh_requested = false; 1043 } 1044 1045 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1046 1047 return rc; 1048} 1049 1050/* 1051 * Perform bottom half processing of work items queued by ISR. 1052 */ 1053static void mgsl_bh_handler(struct work_struct *work) 1054{ 1055 struct mgsl_struct *info = 1056 container_of(work, struct mgsl_struct, task); 1057 int action; 1058 1059 if ( debug_level >= DEBUG_LEVEL_BH ) 1060 printk( "%s(%d):mgsl_bh_handler(%s) entry\n", 1061 __FILE__,__LINE__,info->device_name); 1062 1063 info->bh_running = true; 1064 1065 while((action = mgsl_bh_action(info)) != 0) { 1066 1067 /* Process work item */ 1068 if ( debug_level >= DEBUG_LEVEL_BH ) 1069 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n", 1070 __FILE__,__LINE__,action); 1071 1072 switch (action) { 1073 1074 case BH_RECEIVE: 1075 mgsl_bh_receive(info); 1076 break; 1077 case BH_TRANSMIT: 1078 mgsl_bh_transmit(info); 1079 break; 1080 case BH_STATUS: 1081 mgsl_bh_status(info); 1082 break; 1083 default: 1084 /* unknown work item ID */ 1085 printk("Unknown work item ID=%08X!\n", action); 1086 break; 1087 } 1088 } 1089 1090 if ( debug_level >= DEBUG_LEVEL_BH ) 1091 printk( "%s(%d):mgsl_bh_handler(%s) exit\n", 1092 __FILE__,__LINE__,info->device_name); 1093} 1094 1095static void mgsl_bh_receive(struct mgsl_struct *info) 1096{ 1097 bool (*get_rx_frame)(struct mgsl_struct *info) = 1098 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame); 1099 1100 if ( debug_level >= DEBUG_LEVEL_BH ) 1101 printk( "%s(%d):mgsl_bh_receive(%s)\n", 1102 __FILE__,__LINE__,info->device_name); 1103 1104 do 1105 { 1106 if (info->rx_rcc_underrun) { 1107 unsigned long flags; 1108 spin_lock_irqsave(&info->irq_spinlock,flags); 1109 usc_start_receiver(info); 1110 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1111 return; 1112 } 1113 } while(get_rx_frame(info)); 1114} 1115 1116static void mgsl_bh_transmit(struct mgsl_struct *info) 1117{ 1118 struct tty_struct *tty = info->port.tty; 1119 unsigned long flags; 1120 1121 if ( debug_level >= DEBUG_LEVEL_BH ) 1122 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n", 1123 __FILE__,__LINE__,info->device_name); 1124 1125 if (tty) 1126 tty_wakeup(tty); 1127 1128 /* if transmitter idle and loopmode_send_done_requested 1129 * then start echoing RxD to TxD 1130 */ 1131 spin_lock_irqsave(&info->irq_spinlock,flags); 1132 if ( !info->tx_active && info->loopmode_send_done_requested ) 1133 usc_loopmode_send_done( info ); 1134 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1135} 1136 1137static void mgsl_bh_status(struct mgsl_struct *info) 1138{ 1139 if ( debug_level >= DEBUG_LEVEL_BH ) 1140 printk( "%s(%d):mgsl_bh_status() entry on %s\n", 1141 __FILE__,__LINE__,info->device_name); 1142 1143 info->ri_chkcount = 0; 1144 info->dsr_chkcount = 0; 1145 info->dcd_chkcount = 0; 1146 info->cts_chkcount = 0; 1147} 1148 1149/* mgsl_isr_receive_status() 1150 * 1151 * Service a receive status interrupt. The type of status 1152 * interrupt is indicated by the state of the RCSR. 1153 * This is only used for HDLC mode. 1154 * 1155 * Arguments: info pointer to device instance data 1156 * Return Value: None 1157 */ 1158static void mgsl_isr_receive_status( struct mgsl_struct *info ) 1159{ 1160 u16 status = usc_InReg( info, RCSR ); 1161 1162 if ( debug_level >= DEBUG_LEVEL_ISR ) 1163 printk("%s(%d):mgsl_isr_receive_status status=%04X\n", 1164 __FILE__,__LINE__,status); 1165 1166 if ( (status & RXSTATUS_ABORT_RECEIVED) && 1167 info->loopmode_insert_requested && 1168 usc_loopmode_active(info) ) 1169 { 1170 ++info->icount.rxabort; 1171 info->loopmode_insert_requested = false; 1172 1173 /* clear CMR:13 to start echoing RxD to TxD */ 1174 info->cmr_value &= ~BIT13; 1175 usc_OutReg(info, CMR, info->cmr_value); 1176 1177 /* disable received abort irq (no longer required) */ 1178 usc_OutReg(info, RICR, 1179 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED)); 1180 } 1181 1182 if (status & (RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED)) { 1183 if (status & RXSTATUS_EXITED_HUNT) 1184 info->icount.exithunt++; 1185 if (status & RXSTATUS_IDLE_RECEIVED) 1186 info->icount.rxidle++; 1187 wake_up_interruptible(&info->event_wait_q); 1188 } 1189 1190 if (status & RXSTATUS_OVERRUN){ 1191 info->icount.rxover++; 1192 usc_process_rxoverrun_sync( info ); 1193 } 1194 1195 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 1196 usc_UnlatchRxstatusBits( info, status ); 1197 1198} /* end of mgsl_isr_receive_status() */ 1199 1200/* mgsl_isr_transmit_status() 1201 * 1202 * Service a transmit status interrupt 1203 * HDLC mode :end of transmit frame 1204 * Async mode:all data is sent 1205 * transmit status is indicated by bits in the TCSR. 1206 * 1207 * Arguments: info pointer to device instance data 1208 * Return Value: None 1209 */ 1210static void mgsl_isr_transmit_status( struct mgsl_struct *info ) 1211{ 1212 u16 status = usc_InReg( info, TCSR ); 1213 1214 if ( debug_level >= DEBUG_LEVEL_ISR ) 1215 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n", 1216 __FILE__,__LINE__,status); 1217 1218 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 1219 usc_UnlatchTxstatusBits( info, status ); 1220 1221 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) ) 1222 { 1223 /* finished sending HDLC abort. This may leave */ 1224 /* the TxFifo with data from the aborted frame */ 1225 /* so purge the TxFifo. Also shutdown the DMA */ 1226 /* channel in case there is data remaining in */ 1227 /* the DMA buffer */ 1228 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 1229 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 1230 } 1231 1232 if ( status & TXSTATUS_EOF_SENT ) 1233 info->icount.txok++; 1234 else if ( status & TXSTATUS_UNDERRUN ) 1235 info->icount.txunder++; 1236 else if ( status & TXSTATUS_ABORT_SENT ) 1237 info->icount.txabort++; 1238 else 1239 info->icount.txunder++; 1240 1241 info->tx_active = false; 1242 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 1243 del_timer(&info->tx_timer); 1244 1245 if ( info->drop_rts_on_tx_done ) { 1246 usc_get_serial_signals( info ); 1247 if ( info->serial_signals & SerialSignal_RTS ) { 1248 info->serial_signals &= ~SerialSignal_RTS; 1249 usc_set_serial_signals( info ); 1250 } 1251 info->drop_rts_on_tx_done = false; 1252 } 1253 1254#if SYNCLINK_GENERIC_HDLC 1255 if (info->netcount) 1256 hdlcdev_tx_done(info); 1257 else 1258#endif 1259 { 1260 if (info->port.tty->stopped || info->port.tty->hw_stopped) { 1261 usc_stop_transmitter(info); 1262 return; 1263 } 1264 info->pending_bh |= BH_TRANSMIT; 1265 } 1266 1267} /* end of mgsl_isr_transmit_status() */ 1268 1269/* mgsl_isr_io_pin() 1270 * 1271 * Service an Input/Output pin interrupt. The type of 1272 * interrupt is indicated by bits in the MISR 1273 * 1274 * Arguments: info pointer to device instance data 1275 * Return Value: None 1276 */ 1277static void mgsl_isr_io_pin( struct mgsl_struct *info ) 1278{ 1279 struct mgsl_icount *icount; 1280 u16 status = usc_InReg( info, MISR ); 1281 1282 if ( debug_level >= DEBUG_LEVEL_ISR ) 1283 printk("%s(%d):mgsl_isr_io_pin status=%04X\n", 1284 __FILE__,__LINE__,status); 1285 1286 usc_ClearIrqPendingBits( info, IO_PIN ); 1287 usc_UnlatchIostatusBits( info, status ); 1288 1289 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED | 1290 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) { 1291 icount = &info->icount; 1292 /* update input line counters */ 1293 if (status & MISCSTATUS_RI_LATCHED) { 1294 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1295 usc_DisablestatusIrqs(info,SICR_RI); 1296 icount->rng++; 1297 if ( status & MISCSTATUS_RI ) 1298 info->input_signal_events.ri_up++; 1299 else 1300 info->input_signal_events.ri_down++; 1301 } 1302 if (status & MISCSTATUS_DSR_LATCHED) { 1303 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1304 usc_DisablestatusIrqs(info,SICR_DSR); 1305 icount->dsr++; 1306 if ( status & MISCSTATUS_DSR ) 1307 info->input_signal_events.dsr_up++; 1308 else 1309 info->input_signal_events.dsr_down++; 1310 } 1311 if (status & MISCSTATUS_DCD_LATCHED) { 1312 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1313 usc_DisablestatusIrqs(info,SICR_DCD); 1314 icount->dcd++; 1315 if (status & MISCSTATUS_DCD) { 1316 info->input_signal_events.dcd_up++; 1317 } else 1318 info->input_signal_events.dcd_down++; 1319#if SYNCLINK_GENERIC_HDLC 1320 if (info->netcount) { 1321 if (status & MISCSTATUS_DCD) 1322 netif_carrier_on(info->netdev); 1323 else 1324 netif_carrier_off(info->netdev); 1325 } 1326#endif 1327 } 1328 if (status & MISCSTATUS_CTS_LATCHED) 1329 { 1330 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1331 usc_DisablestatusIrqs(info,SICR_CTS); 1332 icount->cts++; 1333 if ( status & MISCSTATUS_CTS ) 1334 info->input_signal_events.cts_up++; 1335 else 1336 info->input_signal_events.cts_down++; 1337 } 1338 wake_up_interruptible(&info->status_event_wait_q); 1339 wake_up_interruptible(&info->event_wait_q); 1340 1341 if (tty_port_check_carrier(&info->port) && 1342 (status & MISCSTATUS_DCD_LATCHED) ) { 1343 if ( debug_level >= DEBUG_LEVEL_ISR ) 1344 printk("%s CD now %s...", info->device_name, 1345 (status & MISCSTATUS_DCD) ? "on" : "off"); 1346 if (status & MISCSTATUS_DCD) 1347 wake_up_interruptible(&info->port.open_wait); 1348 else { 1349 if ( debug_level >= DEBUG_LEVEL_ISR ) 1350 printk("doing serial hangup..."); 1351 if (info->port.tty) 1352 tty_hangup(info->port.tty); 1353 } 1354 } 1355 1356 if (tty_port_cts_enabled(&info->port) && 1357 (status & MISCSTATUS_CTS_LATCHED) ) { 1358 if (info->port.tty->hw_stopped) { 1359 if (status & MISCSTATUS_CTS) { 1360 if ( debug_level >= DEBUG_LEVEL_ISR ) 1361 printk("CTS tx start..."); 1362 info->port.tty->hw_stopped = 0; 1363 usc_start_transmitter(info); 1364 info->pending_bh |= BH_TRANSMIT; 1365 return; 1366 } 1367 } else { 1368 if (!(status & MISCSTATUS_CTS)) { 1369 if ( debug_level >= DEBUG_LEVEL_ISR ) 1370 printk("CTS tx stop..."); 1371 if (info->port.tty) 1372 info->port.tty->hw_stopped = 1; 1373 usc_stop_transmitter(info); 1374 } 1375 } 1376 } 1377 } 1378 1379 info->pending_bh |= BH_STATUS; 1380 1381 /* for diagnostics set IRQ flag */ 1382 if ( status & MISCSTATUS_TXC_LATCHED ){ 1383 usc_OutReg( info, SICR, 1384 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) ); 1385 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED ); 1386 info->irq_occurred = true; 1387 } 1388 1389} /* end of mgsl_isr_io_pin() */ 1390 1391/* mgsl_isr_transmit_data() 1392 * 1393 * Service a transmit data interrupt (async mode only). 1394 * 1395 * Arguments: info pointer to device instance data 1396 * Return Value: None 1397 */ 1398static void mgsl_isr_transmit_data( struct mgsl_struct *info ) 1399{ 1400 if ( debug_level >= DEBUG_LEVEL_ISR ) 1401 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n", 1402 __FILE__,__LINE__,info->xmit_cnt); 1403 1404 usc_ClearIrqPendingBits( info, TRANSMIT_DATA ); 1405 1406 if (info->port.tty->stopped || info->port.tty->hw_stopped) { 1407 usc_stop_transmitter(info); 1408 return; 1409 } 1410 1411 if ( info->xmit_cnt ) 1412 usc_load_txfifo( info ); 1413 else 1414 info->tx_active = false; 1415 1416 if (info->xmit_cnt < WAKEUP_CHARS) 1417 info->pending_bh |= BH_TRANSMIT; 1418 1419} /* end of mgsl_isr_transmit_data() */ 1420 1421/* mgsl_isr_receive_data() 1422 * 1423 * Service a receive data interrupt. This occurs 1424 * when operating in asynchronous interrupt transfer mode. 1425 * The receive data FIFO is flushed to the receive data buffers. 1426 * 1427 * Arguments: info pointer to device instance data 1428 * Return Value: None 1429 */ 1430static void mgsl_isr_receive_data( struct mgsl_struct *info ) 1431{ 1432 int Fifocount; 1433 u16 status; 1434 int work = 0; 1435 unsigned char DataByte; 1436 struct mgsl_icount *icount = &info->icount; 1437 1438 if ( debug_level >= DEBUG_LEVEL_ISR ) 1439 printk("%s(%d):mgsl_isr_receive_data\n", 1440 __FILE__,__LINE__); 1441 1442 usc_ClearIrqPendingBits( info, RECEIVE_DATA ); 1443 1444 /* select FIFO status for RICR readback */ 1445 usc_RCmd( info, RCmd_SelectRicrRxFifostatus ); 1446 1447 /* clear the Wordstatus bit so that status readback */ 1448 /* only reflects the status of this byte */ 1449 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 )); 1450 1451 /* flush the receive FIFO */ 1452 1453 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) { 1454 int flag; 1455 1456 /* read one byte from RxFIFO */ 1457 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY), 1458 info->io_base + CCAR ); 1459 DataByte = inb( info->io_base + CCAR ); 1460 1461 /* get the status of the received byte */ 1462 status = usc_InReg(info, RCSR); 1463 if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR | 1464 RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) ) 1465 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL); 1466 1467 icount->rx++; 1468 1469 flag = 0; 1470 if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR | 1471 RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) ) { 1472 printk("rxerr=%04X\n",status); 1473 /* update error statistics */ 1474 if ( status & RXSTATUS_BREAK_RECEIVED ) { 1475 status &= ~(RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR); 1476 icount->brk++; 1477 } else if (status & RXSTATUS_PARITY_ERROR) 1478 icount->parity++; 1479 else if (status & RXSTATUS_FRAMING_ERROR) 1480 icount->frame++; 1481 else if (status & RXSTATUS_OVERRUN) { 1482 /* must issue purge fifo cmd before */ 1483 /* 16C32 accepts more receive chars */ 1484 usc_RTCmd(info,RTCmd_PurgeRxFifo); 1485 icount->overrun++; 1486 } 1487 1488 /* discard char if tty control flags say so */ 1489 if (status & info->ignore_status_mask) 1490 continue; 1491 1492 status &= info->read_status_mask; 1493 1494 if (status & RXSTATUS_BREAK_RECEIVED) { 1495 flag = TTY_BREAK; 1496 if (info->port.flags & ASYNC_SAK) 1497 do_SAK(info->port.tty); 1498 } else if (status & RXSTATUS_PARITY_ERROR) 1499 flag = TTY_PARITY; 1500 else if (status & RXSTATUS_FRAMING_ERROR) 1501 flag = TTY_FRAME; 1502 } /* end of if (error) */ 1503 tty_insert_flip_char(&info->port, DataByte, flag); 1504 if (status & RXSTATUS_OVERRUN) { 1505 /* Overrun is special, since it's 1506 * reported immediately, and doesn't 1507 * affect the current character 1508 */ 1509 work += tty_insert_flip_char(&info->port, 0, TTY_OVERRUN); 1510 } 1511 } 1512 1513 if ( debug_level >= DEBUG_LEVEL_ISR ) { 1514 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n", 1515 __FILE__,__LINE__,icount->rx,icount->brk, 1516 icount->parity,icount->frame,icount->overrun); 1517 } 1518 1519 if(work) 1520 tty_flip_buffer_push(&info->port); 1521} 1522 1523/* mgsl_isr_misc() 1524 * 1525 * Service a miscellaneous interrupt source. 1526 * 1527 * Arguments: info pointer to device extension (instance data) 1528 * Return Value: None 1529 */ 1530static void mgsl_isr_misc( struct mgsl_struct *info ) 1531{ 1532 u16 status = usc_InReg( info, MISR ); 1533 1534 if ( debug_level >= DEBUG_LEVEL_ISR ) 1535 printk("%s(%d):mgsl_isr_misc status=%04X\n", 1536 __FILE__,__LINE__,status); 1537 1538 if ((status & MISCSTATUS_RCC_UNDERRUN) && 1539 (info->params.mode == MGSL_MODE_HDLC)) { 1540 1541 /* turn off receiver and rx DMA */ 1542 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 1543 usc_DmaCmd(info, DmaCmd_ResetRxChannel); 1544 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL); 1545 usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS); 1546 usc_DisableInterrupts(info, RECEIVE_DATA | RECEIVE_STATUS); 1547 1548 /* schedule BH handler to restart receiver */ 1549 info->pending_bh |= BH_RECEIVE; 1550 info->rx_rcc_underrun = true; 1551 } 1552 1553 usc_ClearIrqPendingBits( info, MISC ); 1554 usc_UnlatchMiscstatusBits( info, status ); 1555 1556} /* end of mgsl_isr_misc() */ 1557 1558/* mgsl_isr_null() 1559 * 1560 * Services undefined interrupt vectors from the 1561 * USC. (hence this function SHOULD never be called) 1562 * 1563 * Arguments: info pointer to device extension (instance data) 1564 * Return Value: None 1565 */ 1566static void mgsl_isr_null( struct mgsl_struct *info ) 1567{ 1568 1569} /* end of mgsl_isr_null() */ 1570 1571/* mgsl_isr_receive_dma() 1572 * 1573 * Service a receive DMA channel interrupt. 1574 * For this driver there are two sources of receive DMA interrupts 1575 * as identified in the Receive DMA mode Register (RDMR): 1576 * 1577 * BIT3 EOA/EOL End of List, all receive buffers in receive 1578 * buffer list have been filled (no more free buffers 1579 * available). The DMA controller has shut down. 1580 * 1581 * BIT2 EOB End of Buffer. This interrupt occurs when a receive 1582 * DMA buffer is terminated in response to completion 1583 * of a good frame or a frame with errors. The status 1584 * of the frame is stored in the buffer entry in the 1585 * list of receive buffer entries. 1586 * 1587 * Arguments: info pointer to device instance data 1588 * Return Value: None 1589 */ 1590static void mgsl_isr_receive_dma( struct mgsl_struct *info ) 1591{ 1592 u16 status; 1593 1594 /* clear interrupt pending and IUS bit for Rx DMA IRQ */ 1595 usc_OutDmaReg( info, CDIR, BIT9 | BIT1 ); 1596 1597 /* Read the receive DMA status to identify interrupt type. */ 1598 /* This also clears the status bits. */ 1599 status = usc_InDmaReg( info, RDMR ); 1600 1601 if ( debug_level >= DEBUG_LEVEL_ISR ) 1602 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n", 1603 __FILE__,__LINE__,info->device_name,status); 1604 1605 info->pending_bh |= BH_RECEIVE; 1606 1607 if ( status & BIT3 ) { 1608 info->rx_overflow = true; 1609 info->icount.buf_overrun++; 1610 } 1611 1612} /* end of mgsl_isr_receive_dma() */ 1613 1614/* mgsl_isr_transmit_dma() 1615 * 1616 * This function services a transmit DMA channel interrupt. 1617 * 1618 * For this driver there is one source of transmit DMA interrupts 1619 * as identified in the Transmit DMA Mode Register (TDMR): 1620 * 1621 * BIT2 EOB End of Buffer. This interrupt occurs when a 1622 * transmit DMA buffer has been emptied. 1623 * 1624 * The driver maintains enough transmit DMA buffers to hold at least 1625 * one max frame size transmit frame. When operating in a buffered 1626 * transmit mode, there may be enough transmit DMA buffers to hold at 1627 * least two or more max frame size frames. On an EOB condition, 1628 * determine if there are any queued transmit buffers and copy into 1629 * transmit DMA buffers if we have room. 1630 * 1631 * Arguments: info pointer to device instance data 1632 * Return Value: None 1633 */ 1634static void mgsl_isr_transmit_dma( struct mgsl_struct *info ) 1635{ 1636 u16 status; 1637 1638 /* clear interrupt pending and IUS bit for Tx DMA IRQ */ 1639 usc_OutDmaReg(info, CDIR, BIT8 | BIT0 ); 1640 1641 /* Read the transmit DMA status to identify interrupt type. */ 1642 /* This also clears the status bits. */ 1643 1644 status = usc_InDmaReg( info, TDMR ); 1645 1646 if ( debug_level >= DEBUG_LEVEL_ISR ) 1647 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n", 1648 __FILE__,__LINE__,info->device_name,status); 1649 1650 if ( status & BIT2 ) { 1651 --info->tx_dma_buffers_used; 1652 1653 /* if there are transmit frames queued, 1654 * try to load the next one 1655 */ 1656 if ( load_next_tx_holding_buffer(info) ) { 1657 /* if call returns non-zero value, we have 1658 * at least one free tx holding buffer 1659 */ 1660 info->pending_bh |= BH_TRANSMIT; 1661 } 1662 } 1663 1664} /* end of mgsl_isr_transmit_dma() */ 1665 1666/* mgsl_interrupt() 1667 * 1668 * Interrupt service routine entry point. 1669 * 1670 * Arguments: 1671 * 1672 * irq interrupt number that caused interrupt 1673 * dev_id device ID supplied during interrupt registration 1674 * 1675 * Return Value: None 1676 */ 1677static irqreturn_t mgsl_interrupt(int dummy, void *dev_id) 1678{ 1679 struct mgsl_struct *info = dev_id; 1680 u16 UscVector; 1681 u16 DmaVector; 1682 1683 if ( debug_level >= DEBUG_LEVEL_ISR ) 1684 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n", 1685 __FILE__, __LINE__, info->irq_level); 1686 1687 spin_lock(&info->irq_spinlock); 1688 1689 for(;;) { 1690 /* Read the interrupt vectors from hardware. */ 1691 UscVector = usc_InReg(info, IVR) >> 9; 1692 DmaVector = usc_InDmaReg(info, DIVR); 1693 1694 if ( debug_level >= DEBUG_LEVEL_ISR ) 1695 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n", 1696 __FILE__,__LINE__,info->device_name,UscVector,DmaVector); 1697 1698 if ( !UscVector && !DmaVector ) 1699 break; 1700 1701 /* Dispatch interrupt vector */ 1702 if ( UscVector ) 1703 (*UscIsrTable[UscVector])(info); 1704 else if ( (DmaVector&(BIT10|BIT9)) == BIT10) 1705 mgsl_isr_transmit_dma(info); 1706 else 1707 mgsl_isr_receive_dma(info); 1708 1709 if ( info->isr_overflow ) { 1710 printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n", 1711 __FILE__, __LINE__, info->device_name, info->irq_level); 1712 usc_DisableMasterIrqBit(info); 1713 usc_DisableDmaInterrupts(info,DICR_MASTER); 1714 break; 1715 } 1716 } 1717 1718 /* Request bottom half processing if there's something 1719 * for it to do and the bh is not already running 1720 */ 1721 1722 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) { 1723 if ( debug_level >= DEBUG_LEVEL_ISR ) 1724 printk("%s(%d):%s queueing bh task.\n", 1725 __FILE__,__LINE__,info->device_name); 1726 schedule_work(&info->task); 1727 info->bh_requested = true; 1728 } 1729 1730 spin_unlock(&info->irq_spinlock); 1731 1732 if ( debug_level >= DEBUG_LEVEL_ISR ) 1733 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n", 1734 __FILE__, __LINE__, info->irq_level); 1735 1736 return IRQ_HANDLED; 1737} /* end of mgsl_interrupt() */ 1738 1739/* startup() 1740 * 1741 * Initialize and start device. 1742 * 1743 * Arguments: info pointer to device instance data 1744 * Return Value: 0 if success, otherwise error code 1745 */ 1746static int startup(struct mgsl_struct * info) 1747{ 1748 int retval = 0; 1749 1750 if ( debug_level >= DEBUG_LEVEL_INFO ) 1751 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name); 1752 1753 if (tty_port_initialized(&info->port)) 1754 return 0; 1755 1756 if (!info->xmit_buf) { 1757 /* allocate a page of memory for a transmit buffer */ 1758 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL); 1759 if (!info->xmit_buf) { 1760 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", 1761 __FILE__,__LINE__,info->device_name); 1762 return -ENOMEM; 1763 } 1764 } 1765 1766 info->pending_bh = 0; 1767 1768 memset(&info->icount, 0, sizeof(info->icount)); 1769 1770 timer_setup(&info->tx_timer, mgsl_tx_timeout, 0); 1771 1772 /* Allocate and claim adapter resources */ 1773 retval = mgsl_claim_resources(info); 1774 1775 /* perform existence check and diagnostics */ 1776 if ( !retval ) 1777 retval = mgsl_adapter_test(info); 1778 1779 if ( retval ) { 1780 if (capable(CAP_SYS_ADMIN) && info->port.tty) 1781 set_bit(TTY_IO_ERROR, &info->port.tty->flags); 1782 mgsl_release_resources(info); 1783 return retval; 1784 } 1785 1786 /* program hardware for current parameters */ 1787 mgsl_change_params(info); 1788 1789 if (info->port.tty) 1790 clear_bit(TTY_IO_ERROR, &info->port.tty->flags); 1791 1792 tty_port_set_initialized(&info->port, 1); 1793 1794 return 0; 1795} /* end of startup() */ 1796 1797/* shutdown() 1798 * 1799 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware 1800 * 1801 * Arguments: info pointer to device instance data 1802 * Return Value: None 1803 */ 1804static void shutdown(struct mgsl_struct * info) 1805{ 1806 unsigned long flags; 1807 1808 if (!tty_port_initialized(&info->port)) 1809 return; 1810 1811 if (debug_level >= DEBUG_LEVEL_INFO) 1812 printk("%s(%d):mgsl_shutdown(%s)\n", 1813 __FILE__,__LINE__, info->device_name ); 1814 1815 /* clear status wait queue because status changes */ 1816 /* can't happen after shutting down the hardware */ 1817 wake_up_interruptible(&info->status_event_wait_q); 1818 wake_up_interruptible(&info->event_wait_q); 1819 1820 del_timer_sync(&info->tx_timer); 1821 1822 if (info->xmit_buf) { 1823 free_page((unsigned long) info->xmit_buf); 1824 info->xmit_buf = NULL; 1825 } 1826 1827 spin_lock_irqsave(&info->irq_spinlock,flags); 1828 usc_DisableMasterIrqBit(info); 1829 usc_stop_receiver(info); 1830 usc_stop_transmitter(info); 1831 usc_DisableInterrupts(info,RECEIVE_DATA | RECEIVE_STATUS | 1832 TRANSMIT_DATA | TRANSMIT_STATUS | IO_PIN | MISC ); 1833 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE); 1834 1835 /* Disable DMAEN (Port 7, Bit 14) */ 1836 /* This disconnects the DMA request signal from the ISA bus */ 1837 /* on the ISA adapter. This has no effect for the PCI adapter */ 1838 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14)); 1839 1840 /* Disable INTEN (Port 6, Bit12) */ 1841 /* This disconnects the IRQ request signal to the ISA bus */ 1842 /* on the ISA adapter. This has no effect for the PCI adapter */ 1843 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12)); 1844 1845 if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) { 1846 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); 1847 usc_set_serial_signals(info); 1848 } 1849 1850 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1851 1852 mgsl_release_resources(info); 1853 1854 if (info->port.tty) 1855 set_bit(TTY_IO_ERROR, &info->port.tty->flags); 1856 1857 tty_port_set_initialized(&info->port, 0); 1858} /* end of shutdown() */ 1859 1860static void mgsl_program_hw(struct mgsl_struct *info) 1861{ 1862 unsigned long flags; 1863 1864 spin_lock_irqsave(&info->irq_spinlock,flags); 1865 1866 usc_stop_receiver(info); 1867 usc_stop_transmitter(info); 1868 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 1869 1870 if (info->params.mode == MGSL_MODE_HDLC || 1871 info->params.mode == MGSL_MODE_RAW || 1872 info->netcount) 1873 usc_set_sync_mode(info); 1874 else 1875 usc_set_async_mode(info); 1876 1877 usc_set_serial_signals(info); 1878 1879 info->dcd_chkcount = 0; 1880 info->cts_chkcount = 0; 1881 info->ri_chkcount = 0; 1882 info->dsr_chkcount = 0; 1883 1884 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI); 1885 usc_EnableInterrupts(info, IO_PIN); 1886 usc_get_serial_signals(info); 1887 1888 if (info->netcount || info->port.tty->termios.c_cflag & CREAD) 1889 usc_start_receiver(info); 1890 1891 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1892} 1893 1894/* Reconfigure adapter based on new parameters 1895 */ 1896static void mgsl_change_params(struct mgsl_struct *info) 1897{ 1898 unsigned cflag; 1899 int bits_per_char; 1900 1901 if (!info->port.tty) 1902 return; 1903 1904 if (debug_level >= DEBUG_LEVEL_INFO) 1905 printk("%s(%d):mgsl_change_params(%s)\n", 1906 __FILE__,__LINE__, info->device_name ); 1907 1908 cflag = info->port.tty->termios.c_cflag; 1909 1910 /* if B0 rate (hangup) specified then negate RTS and DTR */ 1911 /* otherwise assert RTS and DTR */ 1912 if (cflag & CBAUD) 1913 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; 1914 else 1915 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); 1916 1917 /* byte size and parity */ 1918 1919 switch (cflag & CSIZE) { 1920 case CS5: info->params.data_bits = 5; break; 1921 case CS6: info->params.data_bits = 6; break; 1922 case CS7: info->params.data_bits = 7; break; 1923 case CS8: info->params.data_bits = 8; break; 1924 /* Never happens, but GCC is too dumb to figure it out */ 1925 default: info->params.data_bits = 7; break; 1926 } 1927 1928 if (cflag & CSTOPB) 1929 info->params.stop_bits = 2; 1930 else 1931 info->params.stop_bits = 1; 1932 1933 info->params.parity = ASYNC_PARITY_NONE; 1934 if (cflag & PARENB) { 1935 if (cflag & PARODD) 1936 info->params.parity = ASYNC_PARITY_ODD; 1937 else 1938 info->params.parity = ASYNC_PARITY_EVEN; 1939#ifdef CMSPAR 1940 if (cflag & CMSPAR) 1941 info->params.parity = ASYNC_PARITY_SPACE; 1942#endif 1943 } 1944 1945 /* calculate number of jiffies to transmit a full 1946 * FIFO (32 bytes) at specified data rate 1947 */ 1948 bits_per_char = info->params.data_bits + 1949 info->params.stop_bits + 1; 1950 1951 /* if port data rate is set to 460800 or less then 1952 * allow tty settings to override, otherwise keep the 1953 * current data rate. 1954 */ 1955 if (info->params.data_rate <= 460800) 1956 info->params.data_rate = tty_get_baud_rate(info->port.tty); 1957 1958 if ( info->params.data_rate ) { 1959 info->timeout = (32*HZ*bits_per_char) / 1960 info->params.data_rate; 1961 } 1962 info->timeout += HZ/50; /* Add .02 seconds of slop */ 1963 1964 tty_port_set_cts_flow(&info->port, cflag & CRTSCTS); 1965 tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL); 1966 1967 /* process tty input control flags */ 1968 1969 info->read_status_mask = RXSTATUS_OVERRUN; 1970 if (I_INPCK(info->port.tty)) 1971 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR; 1972 if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty)) 1973 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED; 1974 1975 if (I_IGNPAR(info->port.tty)) 1976 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR; 1977 if (I_IGNBRK(info->port.tty)) { 1978 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED; 1979 /* If ignoring parity and break indicators, ignore 1980 * overruns too. (For real raw support). 1981 */ 1982 if (I_IGNPAR(info->port.tty)) 1983 info->ignore_status_mask |= RXSTATUS_OVERRUN; 1984 } 1985 1986 mgsl_program_hw(info); 1987 1988} /* end of mgsl_change_params() */ 1989 1990/* mgsl_put_char() 1991 * 1992 * Add a character to the transmit buffer. 1993 * 1994 * Arguments: tty pointer to tty information structure 1995 * ch character to add to transmit buffer 1996 * 1997 * Return Value: None 1998 */ 1999static int mgsl_put_char(struct tty_struct *tty, unsigned char ch) 2000{ 2001 struct mgsl_struct *info = tty->driver_data; 2002 unsigned long flags; 2003 int ret = 0; 2004 2005 if (debug_level >= DEBUG_LEVEL_INFO) { 2006 printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n", 2007 __FILE__, __LINE__, ch, info->device_name); 2008 } 2009 2010 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char")) 2011 return 0; 2012 2013 if (!info->xmit_buf) 2014 return 0; 2015 2016 spin_lock_irqsave(&info->irq_spinlock, flags); 2017 2018 if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) { 2019 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) { 2020 info->xmit_buf[info->xmit_head++] = ch; 2021 info->xmit_head &= SERIAL_XMIT_SIZE-1; 2022 info->xmit_cnt++; 2023 ret = 1; 2024 } 2025 } 2026 spin_unlock_irqrestore(&info->irq_spinlock, flags); 2027 return ret; 2028 2029} /* end of mgsl_put_char() */ 2030 2031/* mgsl_flush_chars() 2032 * 2033 * Enable transmitter so remaining characters in the 2034 * transmit buffer are sent. 2035 * 2036 * Arguments: tty pointer to tty information structure 2037 * Return Value: None 2038 */ 2039static void mgsl_flush_chars(struct tty_struct *tty) 2040{ 2041 struct mgsl_struct *info = tty->driver_data; 2042 unsigned long flags; 2043 2044 if ( debug_level >= DEBUG_LEVEL_INFO ) 2045 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n", 2046 __FILE__,__LINE__,info->device_name,info->xmit_cnt); 2047 2048 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars")) 2049 return; 2050 2051 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || 2052 !info->xmit_buf) 2053 return; 2054 2055 if ( debug_level >= DEBUG_LEVEL_INFO ) 2056 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n", 2057 __FILE__,__LINE__,info->device_name ); 2058 2059 spin_lock_irqsave(&info->irq_spinlock,flags); 2060 2061 if (!info->tx_active) { 2062 if ( (info->params.mode == MGSL_MODE_HDLC || 2063 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) { 2064 /* operating in synchronous (frame oriented) mode */ 2065 /* copy data from circular xmit_buf to */ 2066 /* transmit DMA buffer. */ 2067 mgsl_load_tx_dma_buffer(info, 2068 info->xmit_buf,info->xmit_cnt); 2069 } 2070 usc_start_transmitter(info); 2071 } 2072 2073 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2074 2075} /* end of mgsl_flush_chars() */ 2076 2077/* mgsl_write() 2078 * 2079 * Send a block of data 2080 * 2081 * Arguments: 2082 * 2083 * tty pointer to tty information structure 2084 * buf pointer to buffer containing send data 2085 * count size of send data in bytes 2086 * 2087 * Return Value: number of characters written 2088 */ 2089static int mgsl_write(struct tty_struct * tty, 2090 const unsigned char *buf, int count) 2091{ 2092 int c, ret = 0; 2093 struct mgsl_struct *info = tty->driver_data; 2094 unsigned long flags; 2095 2096 if ( debug_level >= DEBUG_LEVEL_INFO ) 2097 printk( "%s(%d):mgsl_write(%s) count=%d\n", 2098 __FILE__,__LINE__,info->device_name,count); 2099 2100 if (mgsl_paranoia_check(info, tty->name, "mgsl_write")) 2101 goto cleanup; 2102 2103 if (!info->xmit_buf) 2104 goto cleanup; 2105 2106 if ( info->params.mode == MGSL_MODE_HDLC || 2107 info->params.mode == MGSL_MODE_RAW ) { 2108 /* operating in synchronous (frame oriented) mode */ 2109 if (info->tx_active) { 2110 2111 if ( info->params.mode == MGSL_MODE_HDLC ) { 2112 ret = 0; 2113 goto cleanup; 2114 } 2115 /* transmitter is actively sending data - 2116 * if we have multiple transmit dma and 2117 * holding buffers, attempt to queue this 2118 * frame for transmission at a later time. 2119 */ 2120 if (info->tx_holding_count >= info->num_tx_holding_buffers ) { 2121 /* no tx holding buffers available */ 2122 ret = 0; 2123 goto cleanup; 2124 } 2125 2126 /* queue transmit frame request */ 2127 ret = count; 2128 save_tx_buffer_request(info,buf,count); 2129 2130 /* if we have sufficient tx dma buffers, 2131 * load the next buffered tx request 2132 */ 2133 spin_lock_irqsave(&info->irq_spinlock,flags); 2134 load_next_tx_holding_buffer(info); 2135 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2136 goto cleanup; 2137 } 2138 2139 /* if operating in HDLC LoopMode and the adapter */ 2140 /* has yet to be inserted into the loop, we can't */ 2141 /* transmit */ 2142 2143 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) && 2144 !usc_loopmode_active(info) ) 2145 { 2146 ret = 0; 2147 goto cleanup; 2148 } 2149 2150 if ( info->xmit_cnt ) { 2151 /* Send accumulated from send_char() calls */ 2152 /* as frame and wait before accepting more data. */ 2153 ret = 0; 2154 2155 /* copy data from circular xmit_buf to */ 2156 /* transmit DMA buffer. */ 2157 mgsl_load_tx_dma_buffer(info, 2158 info->xmit_buf,info->xmit_cnt); 2159 if ( debug_level >= DEBUG_LEVEL_INFO ) 2160 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n", 2161 __FILE__,__LINE__,info->device_name); 2162 } else { 2163 if ( debug_level >= DEBUG_LEVEL_INFO ) 2164 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n", 2165 __FILE__,__LINE__,info->device_name); 2166 ret = count; 2167 info->xmit_cnt = count; 2168 mgsl_load_tx_dma_buffer(info,buf,count); 2169 } 2170 } else { 2171 while (1) { 2172 spin_lock_irqsave(&info->irq_spinlock,flags); 2173 c = min_t(int, count, 2174 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1, 2175 SERIAL_XMIT_SIZE - info->xmit_head)); 2176 if (c <= 0) { 2177 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2178 break; 2179 } 2180 memcpy(info->xmit_buf + info->xmit_head, buf, c); 2181 info->xmit_head = ((info->xmit_head + c) & 2182 (SERIAL_XMIT_SIZE-1)); 2183 info->xmit_cnt += c; 2184 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2185 buf += c; 2186 count -= c; 2187 ret += c; 2188 } 2189 } 2190 2191 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) { 2192 spin_lock_irqsave(&info->irq_spinlock,flags); 2193 if (!info->tx_active) 2194 usc_start_transmitter(info); 2195 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2196 } 2197cleanup: 2198 if ( debug_level >= DEBUG_LEVEL_INFO ) 2199 printk( "%s(%d):mgsl_write(%s) returning=%d\n", 2200 __FILE__,__LINE__,info->device_name,ret); 2201 2202 return ret; 2203 2204} /* end of mgsl_write() */ 2205 2206/* mgsl_write_room() 2207 * 2208 * Return the count of free bytes in transmit buffer 2209 * 2210 * Arguments: tty pointer to tty info structure 2211 * Return Value: None 2212 */ 2213static int mgsl_write_room(struct tty_struct *tty) 2214{ 2215 struct mgsl_struct *info = tty->driver_data; 2216 int ret; 2217 2218 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room")) 2219 return 0; 2220 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1; 2221 if (ret < 0) 2222 ret = 0; 2223 2224 if (debug_level >= DEBUG_LEVEL_INFO) 2225 printk("%s(%d):mgsl_write_room(%s)=%d\n", 2226 __FILE__,__LINE__, info->device_name,ret ); 2227 2228 if ( info->params.mode == MGSL_MODE_HDLC || 2229 info->params.mode == MGSL_MODE_RAW ) { 2230 /* operating in synchronous (frame oriented) mode */ 2231 if ( info->tx_active ) 2232 return 0; 2233 else 2234 return HDLC_MAX_FRAME_SIZE; 2235 } 2236 2237 return ret; 2238 2239} /* end of mgsl_write_room() */ 2240 2241/* mgsl_chars_in_buffer() 2242 * 2243 * Return the count of bytes in transmit buffer 2244 * 2245 * Arguments: tty pointer to tty info structure 2246 * Return Value: None 2247 */ 2248static int mgsl_chars_in_buffer(struct tty_struct *tty) 2249{ 2250 struct mgsl_struct *info = tty->driver_data; 2251 2252 if (debug_level >= DEBUG_LEVEL_INFO) 2253 printk("%s(%d):mgsl_chars_in_buffer(%s)\n", 2254 __FILE__,__LINE__, info->device_name ); 2255 2256 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer")) 2257 return 0; 2258 2259 if (debug_level >= DEBUG_LEVEL_INFO) 2260 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n", 2261 __FILE__,__LINE__, info->device_name,info->xmit_cnt ); 2262 2263 if ( info->params.mode == MGSL_MODE_HDLC || 2264 info->params.mode == MGSL_MODE_RAW ) { 2265 /* operating in synchronous (frame oriented) mode */ 2266 if ( info->tx_active ) 2267 return info->max_frame_size; 2268 else 2269 return 0; 2270 } 2271 2272 return info->xmit_cnt; 2273} /* end of mgsl_chars_in_buffer() */ 2274 2275/* mgsl_flush_buffer() 2276 * 2277 * Discard all data in the send buffer 2278 * 2279 * Arguments: tty pointer to tty info structure 2280 * Return Value: None 2281 */ 2282static void mgsl_flush_buffer(struct tty_struct *tty) 2283{ 2284 struct mgsl_struct *info = tty->driver_data; 2285 unsigned long flags; 2286 2287 if (debug_level >= DEBUG_LEVEL_INFO) 2288 printk("%s(%d):mgsl_flush_buffer(%s) entry\n", 2289 __FILE__,__LINE__, info->device_name ); 2290 2291 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer")) 2292 return; 2293 2294 spin_lock_irqsave(&info->irq_spinlock,flags); 2295 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 2296 del_timer(&info->tx_timer); 2297 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2298 2299 tty_wakeup(tty); 2300} 2301 2302/* mgsl_send_xchar() 2303 * 2304 * Send a high-priority XON/XOFF character 2305 * 2306 * Arguments: tty pointer to tty info structure 2307 * ch character to send 2308 * Return Value: None 2309 */ 2310static void mgsl_send_xchar(struct tty_struct *tty, char ch) 2311{ 2312 struct mgsl_struct *info = tty->driver_data; 2313 unsigned long flags; 2314 2315 if (debug_level >= DEBUG_LEVEL_INFO) 2316 printk("%s(%d):mgsl_send_xchar(%s,%d)\n", 2317 __FILE__,__LINE__, info->device_name, ch ); 2318 2319 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar")) 2320 return; 2321 2322 info->x_char = ch; 2323 if (ch) { 2324 /* Make sure transmit interrupts are on */ 2325 spin_lock_irqsave(&info->irq_spinlock,flags); 2326 if (!info->tx_enabled) 2327 usc_start_transmitter(info); 2328 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2329 } 2330} /* end of mgsl_send_xchar() */ 2331 2332/* mgsl_throttle() 2333 * 2334 * Signal remote device to throttle send data (our receive data) 2335 * 2336 * Arguments: tty pointer to tty info structure 2337 * Return Value: None 2338 */ 2339static void mgsl_throttle(struct tty_struct * tty) 2340{ 2341 struct mgsl_struct *info = tty->driver_data; 2342 unsigned long flags; 2343 2344 if (debug_level >= DEBUG_LEVEL_INFO) 2345 printk("%s(%d):mgsl_throttle(%s) entry\n", 2346 __FILE__,__LINE__, info->device_name ); 2347 2348 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle")) 2349 return; 2350 2351 if (I_IXOFF(tty)) 2352 mgsl_send_xchar(tty, STOP_CHAR(tty)); 2353 2354 if (C_CRTSCTS(tty)) { 2355 spin_lock_irqsave(&info->irq_spinlock,flags); 2356 info->serial_signals &= ~SerialSignal_RTS; 2357 usc_set_serial_signals(info); 2358 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2359 } 2360} /* end of mgsl_throttle() */ 2361 2362/* mgsl_unthrottle() 2363 * 2364 * Signal remote device to stop throttling send data (our receive data) 2365 * 2366 * Arguments: tty pointer to tty info structure 2367 * Return Value: None 2368 */ 2369static void mgsl_unthrottle(struct tty_struct * tty) 2370{ 2371 struct mgsl_struct *info = tty->driver_data; 2372 unsigned long flags; 2373 2374 if (debug_level >= DEBUG_LEVEL_INFO) 2375 printk("%s(%d):mgsl_unthrottle(%s) entry\n", 2376 __FILE__,__LINE__, info->device_name ); 2377 2378 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle")) 2379 return; 2380 2381 if (I_IXOFF(tty)) { 2382 if (info->x_char) 2383 info->x_char = 0; 2384 else 2385 mgsl_send_xchar(tty, START_CHAR(tty)); 2386 } 2387 2388 if (C_CRTSCTS(tty)) { 2389 spin_lock_irqsave(&info->irq_spinlock,flags); 2390 info->serial_signals |= SerialSignal_RTS; 2391 usc_set_serial_signals(info); 2392 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2393 } 2394 2395} /* end of mgsl_unthrottle() */ 2396 2397/* mgsl_get_stats() 2398 * 2399 * get the current serial parameters information 2400 * 2401 * Arguments: info pointer to device instance data 2402 * user_icount pointer to buffer to hold returned stats 2403 * 2404 * Return Value: 0 if success, otherwise error code 2405 */ 2406static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount) 2407{ 2408 int err; 2409 2410 if (debug_level >= DEBUG_LEVEL_INFO) 2411 printk("%s(%d):mgsl_get_params(%s)\n", 2412 __FILE__,__LINE__, info->device_name); 2413 2414 if (!user_icount) { 2415 memset(&info->icount, 0, sizeof(info->icount)); 2416 } else { 2417 mutex_lock(&info->port.mutex); 2418 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount)); 2419 mutex_unlock(&info->port.mutex); 2420 if (err) 2421 return -EFAULT; 2422 } 2423 2424 return 0; 2425 2426} /* end of mgsl_get_stats() */ 2427 2428/* mgsl_get_params() 2429 * 2430 * get the current serial parameters information 2431 * 2432 * Arguments: info pointer to device instance data 2433 * user_params pointer to buffer to hold returned params 2434 * 2435 * Return Value: 0 if success, otherwise error code 2436 */ 2437static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params) 2438{ 2439 int err; 2440 if (debug_level >= DEBUG_LEVEL_INFO) 2441 printk("%s(%d):mgsl_get_params(%s)\n", 2442 __FILE__,__LINE__, info->device_name); 2443 2444 mutex_lock(&info->port.mutex); 2445 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS)); 2446 mutex_unlock(&info->port.mutex); 2447 if (err) { 2448 if ( debug_level >= DEBUG_LEVEL_INFO ) 2449 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n", 2450 __FILE__,__LINE__,info->device_name); 2451 return -EFAULT; 2452 } 2453 2454 return 0; 2455 2456} /* end of mgsl_get_params() */ 2457 2458/* mgsl_set_params() 2459 * 2460 * set the serial parameters 2461 * 2462 * Arguments: 2463 * 2464 * info pointer to device instance data 2465 * new_params user buffer containing new serial params 2466 * 2467 * Return Value: 0 if success, otherwise error code 2468 */ 2469static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params) 2470{ 2471 unsigned long flags; 2472 MGSL_PARAMS tmp_params; 2473 int err; 2474 2475 if (debug_level >= DEBUG_LEVEL_INFO) 2476 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__, 2477 info->device_name ); 2478 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS)); 2479 if (err) { 2480 if ( debug_level >= DEBUG_LEVEL_INFO ) 2481 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n", 2482 __FILE__,__LINE__,info->device_name); 2483 return -EFAULT; 2484 } 2485 2486 mutex_lock(&info->port.mutex); 2487 spin_lock_irqsave(&info->irq_spinlock,flags); 2488 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); 2489 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2490 2491 mgsl_change_params(info); 2492 mutex_unlock(&info->port.mutex); 2493 2494 return 0; 2495 2496} /* end of mgsl_set_params() */ 2497 2498/* mgsl_get_txidle() 2499 * 2500 * get the current transmit idle mode 2501 * 2502 * Arguments: info pointer to device instance data 2503 * idle_mode pointer to buffer to hold returned idle mode 2504 * 2505 * Return Value: 0 if success, otherwise error code 2506 */ 2507static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode) 2508{ 2509 int err; 2510 2511 if (debug_level >= DEBUG_LEVEL_INFO) 2512 printk("%s(%d):mgsl_get_txidle(%s)=%d\n", 2513 __FILE__,__LINE__, info->device_name, info->idle_mode); 2514 2515 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int)); 2516 if (err) { 2517 if ( debug_level >= DEBUG_LEVEL_INFO ) 2518 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n", 2519 __FILE__,__LINE__,info->device_name); 2520 return -EFAULT; 2521 } 2522 2523 return 0; 2524 2525} /* end of mgsl_get_txidle() */ 2526 2527/* mgsl_set_txidle() service ioctl to set transmit idle mode 2528 * 2529 * Arguments: info pointer to device instance data 2530 * idle_mode new idle mode 2531 * 2532 * Return Value: 0 if success, otherwise error code 2533 */ 2534static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode) 2535{ 2536 unsigned long flags; 2537 2538 if (debug_level >= DEBUG_LEVEL_INFO) 2539 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__, 2540 info->device_name, idle_mode ); 2541 2542 spin_lock_irqsave(&info->irq_spinlock,flags); 2543 info->idle_mode = idle_mode; 2544 usc_set_txidle( info ); 2545 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2546 return 0; 2547 2548} /* end of mgsl_set_txidle() */ 2549 2550/* mgsl_txenable() 2551 * 2552 * enable or disable the transmitter 2553 * 2554 * Arguments: 2555 * 2556 * info pointer to device instance data 2557 * enable 1 = enable, 0 = disable 2558 * 2559 * Return Value: 0 if success, otherwise error code 2560 */ 2561static int mgsl_txenable(struct mgsl_struct * info, int enable) 2562{ 2563 unsigned long flags; 2564 2565 if (debug_level >= DEBUG_LEVEL_INFO) 2566 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__, 2567 info->device_name, enable); 2568 2569 spin_lock_irqsave(&info->irq_spinlock,flags); 2570 if ( enable ) { 2571 if ( !info->tx_enabled ) { 2572 2573 usc_start_transmitter(info); 2574 /*-------------------------------------------------- 2575 * if HDLC/SDLC Loop mode, attempt to insert the 2576 * station in the 'loop' by setting CMR:13. Upon 2577 * receipt of the next GoAhead (RxAbort) sequence, 2578 * the OnLoop indicator (CCSR:7) should go active 2579 * to indicate that we are on the loop 2580 *--------------------------------------------------*/ 2581 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 2582 usc_loopmode_insert_request( info ); 2583 } 2584 } else { 2585 if ( info->tx_enabled ) 2586 usc_stop_transmitter(info); 2587 } 2588 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2589 return 0; 2590 2591} /* end of mgsl_txenable() */ 2592 2593/* mgsl_txabort() abort send HDLC frame 2594 * 2595 * Arguments: info pointer to device instance data 2596 * Return Value: 0 if success, otherwise error code 2597 */ 2598static int mgsl_txabort(struct mgsl_struct * info) 2599{ 2600 unsigned long flags; 2601 2602 if (debug_level >= DEBUG_LEVEL_INFO) 2603 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__, 2604 info->device_name); 2605 2606 spin_lock_irqsave(&info->irq_spinlock,flags); 2607 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC ) 2608 { 2609 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 2610 usc_loopmode_cancel_transmit( info ); 2611 else 2612 usc_TCmd(info,TCmd_SendAbort); 2613 } 2614 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2615 return 0; 2616 2617} /* end of mgsl_txabort() */ 2618 2619/* mgsl_rxenable() enable or disable the receiver 2620 * 2621 * Arguments: info pointer to device instance data 2622 * enable 1 = enable, 0 = disable 2623 * Return Value: 0 if success, otherwise error code 2624 */ 2625static int mgsl_rxenable(struct mgsl_struct * info, int enable) 2626{ 2627 unsigned long flags; 2628 2629 if (debug_level >= DEBUG_LEVEL_INFO) 2630 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__, 2631 info->device_name, enable); 2632 2633 spin_lock_irqsave(&info->irq_spinlock,flags); 2634 if ( enable ) { 2635 if ( !info->rx_enabled ) 2636 usc_start_receiver(info); 2637 } else { 2638 if ( info->rx_enabled ) 2639 usc_stop_receiver(info); 2640 } 2641 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2642 return 0; 2643 2644} /* end of mgsl_rxenable() */ 2645 2646/* mgsl_wait_event() wait for specified event to occur 2647 * 2648 * Arguments: info pointer to device instance data 2649 * mask pointer to bitmask of events to wait for 2650 * Return Value: 0 if successful and bit mask updated with 2651 * of events triggerred, 2652 * otherwise error code 2653 */ 2654static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr) 2655{ 2656 unsigned long flags; 2657 int s; 2658 int rc=0; 2659 struct mgsl_icount cprev, cnow; 2660 int events; 2661 int mask; 2662 struct _input_signal_events oldsigs, newsigs; 2663 DECLARE_WAITQUEUE(wait, current); 2664 2665 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int)); 2666 if (rc) { 2667 return -EFAULT; 2668 } 2669 2670 if (debug_level >= DEBUG_LEVEL_INFO) 2671 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__, 2672 info->device_name, mask); 2673 2674 spin_lock_irqsave(&info->irq_spinlock,flags); 2675 2676 /* return immediately if state matches requested events */ 2677 usc_get_serial_signals(info); 2678 s = info->serial_signals; 2679 events = mask & 2680 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + 2681 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + 2682 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + 2683 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); 2684 if (events) { 2685 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2686 goto exit; 2687 } 2688 2689 /* save current irq counts */ 2690 cprev = info->icount; 2691 oldsigs = info->input_signal_events; 2692 2693 /* enable hunt and idle irqs if needed */ 2694 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { 2695 u16 oldreg = usc_InReg(info,RICR); 2696 u16 newreg = oldreg + 2697 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) + 2698 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0); 2699 if (oldreg != newreg) 2700 usc_OutReg(info, RICR, newreg); 2701 } 2702 2703 set_current_state(TASK_INTERRUPTIBLE); 2704 add_wait_queue(&info->event_wait_q, &wait); 2705 2706 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2707 2708 2709 for(;;) { 2710 schedule(); 2711 if (signal_pending(current)) { 2712 rc = -ERESTARTSYS; 2713 break; 2714 } 2715 2716 /* get current irq counts */ 2717 spin_lock_irqsave(&info->irq_spinlock,flags); 2718 cnow = info->icount; 2719 newsigs = info->input_signal_events; 2720 set_current_state(TASK_INTERRUPTIBLE); 2721 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2722 2723 /* if no change, wait aborted for some reason */ 2724 if (newsigs.dsr_up == oldsigs.dsr_up && 2725 newsigs.dsr_down == oldsigs.dsr_down && 2726 newsigs.dcd_up == oldsigs.dcd_up && 2727 newsigs.dcd_down == oldsigs.dcd_down && 2728 newsigs.cts_up == oldsigs.cts_up && 2729 newsigs.cts_down == oldsigs.cts_down && 2730 newsigs.ri_up == oldsigs.ri_up && 2731 newsigs.ri_down == oldsigs.ri_down && 2732 cnow.exithunt == cprev.exithunt && 2733 cnow.rxidle == cprev.rxidle) { 2734 rc = -EIO; 2735 break; 2736 } 2737 2738 events = mask & 2739 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) + 2740 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) + 2741 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) + 2742 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) + 2743 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) + 2744 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) + 2745 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) + 2746 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) + 2747 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) + 2748 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) ); 2749 if (events) 2750 break; 2751 2752 cprev = cnow; 2753 oldsigs = newsigs; 2754 } 2755 2756 remove_wait_queue(&info->event_wait_q, &wait); 2757 set_current_state(TASK_RUNNING); 2758 2759 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { 2760 spin_lock_irqsave(&info->irq_spinlock,flags); 2761 if (!waitqueue_active(&info->event_wait_q)) { 2762 /* disable enable exit hunt mode/idle rcvd IRQs */ 2763 usc_OutReg(info, RICR, usc_InReg(info,RICR) & 2764 ~(RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED)); 2765 } 2766 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2767 } 2768exit: 2769 if ( rc == 0 ) 2770 PUT_USER(rc, events, mask_ptr); 2771 2772 return rc; 2773 2774} /* end of mgsl_wait_event() */ 2775 2776static int modem_input_wait(struct mgsl_struct *info,int arg) 2777{ 2778 unsigned long flags; 2779 int rc; 2780 struct mgsl_icount cprev, cnow; 2781 DECLARE_WAITQUEUE(wait, current); 2782 2783 /* save current irq counts */ 2784 spin_lock_irqsave(&info->irq_spinlock,flags); 2785 cprev = info->icount; 2786 add_wait_queue(&info->status_event_wait_q, &wait); 2787 set_current_state(TASK_INTERRUPTIBLE); 2788 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2789 2790 for(;;) { 2791 schedule(); 2792 if (signal_pending(current)) { 2793 rc = -ERESTARTSYS; 2794 break; 2795 } 2796 2797 /* get new irq counts */ 2798 spin_lock_irqsave(&info->irq_spinlock,flags); 2799 cnow = info->icount; 2800 set_current_state(TASK_INTERRUPTIBLE); 2801 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2802 2803 /* if no change, wait aborted for some reason */ 2804 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && 2805 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { 2806 rc = -EIO; 2807 break; 2808 } 2809 2810 /* check for change in caller specified modem input */ 2811 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) || 2812 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) || 2813 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) || 2814 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) { 2815 rc = 0; 2816 break; 2817 } 2818 2819 cprev = cnow; 2820 } 2821 remove_wait_queue(&info->status_event_wait_q, &wait); 2822 set_current_state(TASK_RUNNING); 2823 return rc; 2824} 2825 2826/* return the state of the serial control and status signals 2827 */ 2828static int tiocmget(struct tty_struct *tty) 2829{ 2830 struct mgsl_struct *info = tty->driver_data; 2831 unsigned int result; 2832 unsigned long flags; 2833 2834 spin_lock_irqsave(&info->irq_spinlock,flags); 2835 usc_get_serial_signals(info); 2836 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2837 2838 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) + 2839 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) + 2840 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) + 2841 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) + 2842 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) + 2843 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0); 2844 2845 if (debug_level >= DEBUG_LEVEL_INFO) 2846 printk("%s(%d):%s tiocmget() value=%08X\n", 2847 __FILE__,__LINE__, info->device_name, result ); 2848 return result; 2849} 2850 2851/* set modem control signals (DTR/RTS) 2852 */ 2853static int tiocmset(struct tty_struct *tty, 2854 unsigned int set, unsigned int clear) 2855{ 2856 struct mgsl_struct *info = tty->driver_data; 2857 unsigned long flags; 2858 2859 if (debug_level >= DEBUG_LEVEL_INFO) 2860 printk("%s(%d):%s tiocmset(%x,%x)\n", 2861 __FILE__,__LINE__,info->device_name, set, clear); 2862 2863 if (set & TIOCM_RTS) 2864 info->serial_signals |= SerialSignal_RTS; 2865 if (set & TIOCM_DTR) 2866 info->serial_signals |= SerialSignal_DTR; 2867 if (clear & TIOCM_RTS) 2868 info->serial_signals &= ~SerialSignal_RTS; 2869 if (clear & TIOCM_DTR) 2870 info->serial_signals &= ~SerialSignal_DTR; 2871 2872 spin_lock_irqsave(&info->irq_spinlock,flags); 2873 usc_set_serial_signals(info); 2874 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2875 2876 return 0; 2877} 2878 2879/* mgsl_break() Set or clear transmit break condition 2880 * 2881 * Arguments: tty pointer to tty instance data 2882 * break_state -1=set break condition, 0=clear 2883 * Return Value: error code 2884 */ 2885static int mgsl_break(struct tty_struct *tty, int break_state) 2886{ 2887 struct mgsl_struct * info = tty->driver_data; 2888 unsigned long flags; 2889 2890 if (debug_level >= DEBUG_LEVEL_INFO) 2891 printk("%s(%d):mgsl_break(%s,%d)\n", 2892 __FILE__,__LINE__, info->device_name, break_state); 2893 2894 if (mgsl_paranoia_check(info, tty->name, "mgsl_break")) 2895 return -EINVAL; 2896 2897 spin_lock_irqsave(&info->irq_spinlock,flags); 2898 if (break_state == -1) 2899 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7)); 2900 else 2901 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7)); 2902 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2903 return 0; 2904 2905} /* end of mgsl_break() */ 2906 2907/* 2908 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) 2909 * Return: write counters to the user passed counter struct 2910 * NB: both 1->0 and 0->1 transitions are counted except for 2911 * RI where only 0->1 is counted. 2912 */ 2913static int msgl_get_icount(struct tty_struct *tty, 2914 struct serial_icounter_struct *icount) 2915 2916{ 2917 struct mgsl_struct * info = tty->driver_data; 2918 struct mgsl_icount cnow; /* kernel counter temps */ 2919 unsigned long flags; 2920 2921 spin_lock_irqsave(&info->irq_spinlock,flags); 2922 cnow = info->icount; 2923 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2924 2925 icount->cts = cnow.cts; 2926 icount->dsr = cnow.dsr; 2927 icount->rng = cnow.rng; 2928 icount->dcd = cnow.dcd; 2929 icount->rx = cnow.rx; 2930 icount->tx = cnow.tx; 2931 icount->frame = cnow.frame; 2932 icount->overrun = cnow.overrun; 2933 icount->parity = cnow.parity; 2934 icount->brk = cnow.brk; 2935 icount->buf_overrun = cnow.buf_overrun; 2936 return 0; 2937} 2938 2939/* mgsl_ioctl() Service an IOCTL request 2940 * 2941 * Arguments: 2942 * 2943 * tty pointer to tty instance data 2944 * cmd IOCTL command code 2945 * arg command argument/context 2946 * 2947 * Return Value: 0 if success, otherwise error code 2948 */ 2949static int mgsl_ioctl(struct tty_struct *tty, 2950 unsigned int cmd, unsigned long arg) 2951{ 2952 struct mgsl_struct * info = tty->driver_data; 2953 2954 if (debug_level >= DEBUG_LEVEL_INFO) 2955 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__, 2956 info->device_name, cmd ); 2957 2958 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl")) 2959 return -ENODEV; 2960 2961 if (cmd != TIOCMIWAIT) { 2962 if (tty_io_error(tty)) 2963 return -EIO; 2964 } 2965 2966 return mgsl_ioctl_common(info, cmd, arg); 2967} 2968 2969static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg) 2970{ 2971 void __user *argp = (void __user *)arg; 2972 2973 switch (cmd) { 2974 case MGSL_IOCGPARAMS: 2975 return mgsl_get_params(info, argp); 2976 case MGSL_IOCSPARAMS: 2977 return mgsl_set_params(info, argp); 2978 case MGSL_IOCGTXIDLE: 2979 return mgsl_get_txidle(info, argp); 2980 case MGSL_IOCSTXIDLE: 2981 return mgsl_set_txidle(info,(int)arg); 2982 case MGSL_IOCTXENABLE: 2983 return mgsl_txenable(info,(int)arg); 2984 case MGSL_IOCRXENABLE: 2985 return mgsl_rxenable(info,(int)arg); 2986 case MGSL_IOCTXABORT: 2987 return mgsl_txabort(info); 2988 case MGSL_IOCGSTATS: 2989 return mgsl_get_stats(info, argp); 2990 case MGSL_IOCWAITEVENT: 2991 return mgsl_wait_event(info, argp); 2992 case MGSL_IOCLOOPTXDONE: 2993 return mgsl_loopmode_send_done(info); 2994 /* Wait for modem input (DCD,RI,DSR,CTS) change 2995 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS) 2996 */ 2997 case TIOCMIWAIT: 2998 return modem_input_wait(info,(int)arg); 2999 3000 default: 3001 return -ENOIOCTLCMD; 3002 } 3003 return 0; 3004} 3005 3006/* mgsl_set_termios() 3007 * 3008 * Set new termios settings 3009 * 3010 * Arguments: 3011 * 3012 * tty pointer to tty structure 3013 * termios pointer to buffer to hold returned old termios 3014 * 3015 * Return Value: None 3016 */ 3017static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios) 3018{ 3019 struct mgsl_struct *info = tty->driver_data; 3020 unsigned long flags; 3021 3022 if (debug_level >= DEBUG_LEVEL_INFO) 3023 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__, 3024 tty->driver->name ); 3025 3026 mgsl_change_params(info); 3027 3028 /* Handle transition to B0 status */ 3029 if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) { 3030 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); 3031 spin_lock_irqsave(&info->irq_spinlock,flags); 3032 usc_set_serial_signals(info); 3033 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3034 } 3035 3036 /* Handle transition away from B0 status */ 3037 if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) { 3038 info->serial_signals |= SerialSignal_DTR; 3039 if (!C_CRTSCTS(tty) || !tty_throttled(tty)) 3040 info->serial_signals |= SerialSignal_RTS; 3041 spin_lock_irqsave(&info->irq_spinlock,flags); 3042 usc_set_serial_signals(info); 3043 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3044 } 3045 3046 /* Handle turning off CRTSCTS */ 3047 if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) { 3048 tty->hw_stopped = 0; 3049 mgsl_start(tty); 3050 } 3051 3052} /* end of mgsl_set_termios() */ 3053 3054/* mgsl_close() 3055 * 3056 * Called when port is closed. Wait for remaining data to be 3057 * sent. Disable port and free resources. 3058 * 3059 * Arguments: 3060 * 3061 * tty pointer to open tty structure 3062 * filp pointer to open file object 3063 * 3064 * Return Value: None 3065 */ 3066static void mgsl_close(struct tty_struct *tty, struct file * filp) 3067{ 3068 struct mgsl_struct * info = tty->driver_data; 3069 3070 if (mgsl_paranoia_check(info, tty->name, "mgsl_close")) 3071 return; 3072 3073 if (debug_level >= DEBUG_LEVEL_INFO) 3074 printk("%s(%d):mgsl_close(%s) entry, count=%d\n", 3075 __FILE__,__LINE__, info->device_name, info->port.count); 3076 3077 if (tty_port_close_start(&info->port, tty, filp) == 0) 3078 goto cleanup; 3079 3080 mutex_lock(&info->port.mutex); 3081 if (tty_port_initialized(&info->port)) 3082 mgsl_wait_until_sent(tty, info->timeout); 3083 mgsl_flush_buffer(tty); 3084 tty_ldisc_flush(tty); 3085 shutdown(info); 3086 mutex_unlock(&info->port.mutex); 3087 3088 tty_port_close_end(&info->port, tty); 3089 info->port.tty = NULL; 3090cleanup: 3091 if (debug_level >= DEBUG_LEVEL_INFO) 3092 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__, 3093 tty->driver->name, info->port.count); 3094 3095} /* end of mgsl_close() */ 3096 3097/* mgsl_wait_until_sent() 3098 * 3099 * Wait until the transmitter is empty. 3100 * 3101 * Arguments: 3102 * 3103 * tty pointer to tty info structure 3104 * timeout time to wait for send completion 3105 * 3106 * Return Value: None 3107 */ 3108static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout) 3109{ 3110 struct mgsl_struct * info = tty->driver_data; 3111 unsigned long orig_jiffies, char_time; 3112 3113 if (!info ) 3114 return; 3115 3116 if (debug_level >= DEBUG_LEVEL_INFO) 3117 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n", 3118 __FILE__,__LINE__, info->device_name ); 3119 3120 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent")) 3121 return; 3122 3123 if (!tty_port_initialized(&info->port)) 3124 goto exit; 3125 3126 orig_jiffies = jiffies; 3127 3128 /* Set check interval to 1/5 of estimated time to 3129 * send a character, and make it at least 1. The check 3130 * interval should also be less than the timeout. 3131 * Note: use tight timings here to satisfy the NIST-PCTS. 3132 */ 3133 3134 if ( info->params.data_rate ) { 3135 char_time = info->timeout/(32 * 5); 3136 if (!char_time) 3137 char_time++; 3138 } else 3139 char_time = 1; 3140 3141 if (timeout) 3142 char_time = min_t(unsigned long, char_time, timeout); 3143 3144 if ( info->params.mode == MGSL_MODE_HDLC || 3145 info->params.mode == MGSL_MODE_RAW ) { 3146 while (info->tx_active) { 3147 msleep_interruptible(jiffies_to_msecs(char_time)); 3148 if (signal_pending(current)) 3149 break; 3150 if (timeout && time_after(jiffies, orig_jiffies + timeout)) 3151 break; 3152 } 3153 } else { 3154 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) && 3155 info->tx_enabled) { 3156 msleep_interruptible(jiffies_to_msecs(char_time)); 3157 if (signal_pending(current)) 3158 break; 3159 if (timeout && time_after(jiffies, orig_jiffies + timeout)) 3160 break; 3161 } 3162 } 3163 3164exit: 3165 if (debug_level >= DEBUG_LEVEL_INFO) 3166 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n", 3167 __FILE__,__LINE__, info->device_name ); 3168 3169} /* end of mgsl_wait_until_sent() */ 3170 3171/* mgsl_hangup() 3172 * 3173 * Called by tty_hangup() when a hangup is signaled. 3174 * This is the same as to closing all open files for the port. 3175 * 3176 * Arguments: tty pointer to associated tty object 3177 * Return Value: None 3178 */ 3179static void mgsl_hangup(struct tty_struct *tty) 3180{ 3181 struct mgsl_struct * info = tty->driver_data; 3182 3183 if (debug_level >= DEBUG_LEVEL_INFO) 3184 printk("%s(%d):mgsl_hangup(%s)\n", 3185 __FILE__,__LINE__, info->device_name ); 3186 3187 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup")) 3188 return; 3189 3190 mgsl_flush_buffer(tty); 3191 shutdown(info); 3192 3193 info->port.count = 0; 3194 tty_port_set_active(&info->port, 0); 3195 info->port.tty = NULL; 3196 3197 wake_up_interruptible(&info->port.open_wait); 3198 3199} /* end of mgsl_hangup() */ 3200 3201/* 3202 * carrier_raised() 3203 * 3204 * Return true if carrier is raised 3205 */ 3206 3207static int carrier_raised(struct tty_port *port) 3208{ 3209 unsigned long flags; 3210 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port); 3211 3212 spin_lock_irqsave(&info->irq_spinlock, flags); 3213 usc_get_serial_signals(info); 3214 spin_unlock_irqrestore(&info->irq_spinlock, flags); 3215 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0; 3216} 3217 3218static void dtr_rts(struct tty_port *port, int on) 3219{ 3220 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port); 3221 unsigned long flags; 3222 3223 spin_lock_irqsave(&info->irq_spinlock,flags); 3224 if (on) 3225 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; 3226 else 3227 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR); 3228 usc_set_serial_signals(info); 3229 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3230} 3231 3232 3233/* block_til_ready() 3234 * 3235 * Block the current process until the specified port 3236 * is ready to be opened. 3237 * 3238 * Arguments: 3239 * 3240 * tty pointer to tty info structure 3241 * filp pointer to open file object 3242 * info pointer to device instance data 3243 * 3244 * Return Value: 0 if success, otherwise error code 3245 */ 3246static int block_til_ready(struct tty_struct *tty, struct file * filp, 3247 struct mgsl_struct *info) 3248{ 3249 DECLARE_WAITQUEUE(wait, current); 3250 int retval; 3251 bool do_clocal = false; 3252 unsigned long flags; 3253 int dcd; 3254 struct tty_port *port = &info->port; 3255 3256 if (debug_level >= DEBUG_LEVEL_INFO) 3257 printk("%s(%d):block_til_ready on %s\n", 3258 __FILE__,__LINE__, tty->driver->name ); 3259 3260 if (filp->f_flags & O_NONBLOCK || tty_io_error(tty)) { 3261 /* nonblock mode is set or port is not enabled */ 3262 tty_port_set_active(port, 1); 3263 return 0; 3264 } 3265 3266 if (C_CLOCAL(tty)) 3267 do_clocal = true; 3268 3269 /* Wait for carrier detect and the line to become 3270 * free (i.e., not in use by the callout). While we are in 3271 * this loop, port->count is dropped by one, so that 3272 * mgsl_close() knows when to free things. We restore it upon 3273 * exit, either normal or abnormal. 3274 */ 3275 3276 retval = 0; 3277 add_wait_queue(&port->open_wait, &wait); 3278 3279 if (debug_level >= DEBUG_LEVEL_INFO) 3280 printk("%s(%d):block_til_ready before block on %s count=%d\n", 3281 __FILE__,__LINE__, tty->driver->name, port->count ); 3282 3283 spin_lock_irqsave(&info->irq_spinlock, flags); 3284 port->count--; 3285 spin_unlock_irqrestore(&info->irq_spinlock, flags); 3286 port->blocked_open++; 3287 3288 while (1) { 3289 if (C_BAUD(tty) && tty_port_initialized(port)) 3290 tty_port_raise_dtr_rts(port); 3291 3292 set_current_state(TASK_INTERRUPTIBLE); 3293 3294 if (tty_hung_up_p(filp) || !tty_port_initialized(port)) { 3295 retval = (port->flags & ASYNC_HUP_NOTIFY) ? 3296 -EAGAIN : -ERESTARTSYS; 3297 break; 3298 } 3299 3300 dcd = tty_port_carrier_raised(&info->port); 3301 if (do_clocal || dcd) 3302 break; 3303 3304 if (signal_pending(current)) { 3305 retval = -ERESTARTSYS; 3306 break; 3307 } 3308 3309 if (debug_level >= DEBUG_LEVEL_INFO) 3310 printk("%s(%d):block_til_ready blocking on %s count=%d\n", 3311 __FILE__,__LINE__, tty->driver->name, port->count ); 3312 3313 tty_unlock(tty); 3314 schedule(); 3315 tty_lock(tty); 3316 } 3317 3318 set_current_state(TASK_RUNNING); 3319 remove_wait_queue(&port->open_wait, &wait); 3320 3321 /* FIXME: Racy on hangup during close wait */ 3322 if (!tty_hung_up_p(filp)) 3323 port->count++; 3324 port->blocked_open--; 3325 3326 if (debug_level >= DEBUG_LEVEL_INFO) 3327 printk("%s(%d):block_til_ready after blocking on %s count=%d\n", 3328 __FILE__,__LINE__, tty->driver->name, port->count ); 3329 3330 if (!retval) 3331 tty_port_set_active(port, 1); 3332 3333 return retval; 3334 3335} /* end of block_til_ready() */ 3336 3337static int mgsl_install(struct tty_driver *driver, struct tty_struct *tty) 3338{ 3339 struct mgsl_struct *info; 3340 int line = tty->index; 3341 3342 /* verify range of specified line number */ 3343 if (line >= mgsl_device_count) { 3344 printk("%s(%d):mgsl_open with invalid line #%d.\n", 3345 __FILE__, __LINE__, line); 3346 return -ENODEV; 3347 } 3348 3349 /* find the info structure for the specified line */ 3350 info = mgsl_device_list; 3351 while (info && info->line != line) 3352 info = info->next_device; 3353 if (mgsl_paranoia_check(info, tty->name, "mgsl_open")) 3354 return -ENODEV; 3355 tty->driver_data = info; 3356 3357 return tty_port_install(&info->port, driver, tty); 3358} 3359 3360/* mgsl_open() 3361 * 3362 * Called when a port is opened. Init and enable port. 3363 * Perform serial-specific initialization for the tty structure. 3364 * 3365 * Arguments: tty pointer to tty info structure 3366 * filp associated file pointer 3367 * 3368 * Return Value: 0 if success, otherwise error code 3369 */ 3370static int mgsl_open(struct tty_struct *tty, struct file * filp) 3371{ 3372 struct mgsl_struct *info = tty->driver_data; 3373 unsigned long flags; 3374 int retval; 3375 3376 info->port.tty = tty; 3377 3378 if (debug_level >= DEBUG_LEVEL_INFO) 3379 printk("%s(%d):mgsl_open(%s), old ref count = %d\n", 3380 __FILE__,__LINE__,tty->driver->name, info->port.count); 3381 3382 info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; 3383 3384 spin_lock_irqsave(&info->netlock, flags); 3385 if (info->netcount) { 3386 retval = -EBUSY; 3387 spin_unlock_irqrestore(&info->netlock, flags); 3388 goto cleanup; 3389 } 3390 info->port.count++; 3391 spin_unlock_irqrestore(&info->netlock, flags); 3392 3393 if (info->port.count == 1) { 3394 /* 1st open on this device, init hardware */ 3395 retval = startup(info); 3396 if (retval < 0) 3397 goto cleanup; 3398 } 3399 3400 retval = block_til_ready(tty, filp, info); 3401 if (retval) { 3402 if (debug_level >= DEBUG_LEVEL_INFO) 3403 printk("%s(%d):block_til_ready(%s) returned %d\n", 3404 __FILE__,__LINE__, info->device_name, retval); 3405 goto cleanup; 3406 } 3407 3408 if (debug_level >= DEBUG_LEVEL_INFO) 3409 printk("%s(%d):mgsl_open(%s) success\n", 3410 __FILE__,__LINE__, info->device_name); 3411 retval = 0; 3412 3413cleanup: 3414 if (retval) { 3415 if (tty->count == 1) 3416 info->port.tty = NULL; /* tty layer will release tty struct */ 3417 if(info->port.count) 3418 info->port.count--; 3419 } 3420 3421 return retval; 3422 3423} /* end of mgsl_open() */ 3424 3425/* 3426 * /proc fs routines.... 3427 */ 3428 3429static inline void line_info(struct seq_file *m, struct mgsl_struct *info) 3430{ 3431 char stat_buf[30]; 3432 unsigned long flags; 3433 3434 seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X", 3435 info->device_name, info->io_base, info->irq_level, 3436 info->phys_memory_base, info->phys_lcr_base); 3437 3438 /* output current serial signal states */ 3439 spin_lock_irqsave(&info->irq_spinlock,flags); 3440 usc_get_serial_signals(info); 3441 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3442 3443 stat_buf[0] = 0; 3444 stat_buf[1] = 0; 3445 if (info->serial_signals & SerialSignal_RTS) 3446 strcat(stat_buf, "|RTS"); 3447 if (info->serial_signals & SerialSignal_CTS) 3448 strcat(stat_buf, "|CTS"); 3449 if (info->serial_signals & SerialSignal_DTR) 3450 strcat(stat_buf, "|DTR"); 3451 if (info->serial_signals & SerialSignal_DSR) 3452 strcat(stat_buf, "|DSR"); 3453 if (info->serial_signals & SerialSignal_DCD) 3454 strcat(stat_buf, "|CD"); 3455 if (info->serial_signals & SerialSignal_RI) 3456 strcat(stat_buf, "|RI"); 3457 3458 if (info->params.mode == MGSL_MODE_HDLC || 3459 info->params.mode == MGSL_MODE_RAW ) { 3460 seq_printf(m, " HDLC txok:%d rxok:%d", 3461 info->icount.txok, info->icount.rxok); 3462 if (info->icount.txunder) 3463 seq_printf(m, " txunder:%d", info->icount.txunder); 3464 if (info->icount.txabort) 3465 seq_printf(m, " txabort:%d", info->icount.txabort); 3466 if (info->icount.rxshort) 3467 seq_printf(m, " rxshort:%d", info->icount.rxshort); 3468 if (info->icount.rxlong) 3469 seq_printf(m, " rxlong:%d", info->icount.rxlong); 3470 if (info->icount.rxover) 3471 seq_printf(m, " rxover:%d", info->icount.rxover); 3472 if (info->icount.rxcrc) 3473 seq_printf(m, " rxcrc:%d", info->icount.rxcrc); 3474 } else { 3475 seq_printf(m, " ASYNC tx:%d rx:%d", 3476 info->icount.tx, info->icount.rx); 3477 if (info->icount.frame) 3478 seq_printf(m, " fe:%d", info->icount.frame); 3479 if (info->icount.parity) 3480 seq_printf(m, " pe:%d", info->icount.parity); 3481 if (info->icount.brk) 3482 seq_printf(m, " brk:%d", info->icount.brk); 3483 if (info->icount.overrun) 3484 seq_printf(m, " oe:%d", info->icount.overrun); 3485 } 3486 3487 /* Append serial signal status to end */ 3488 seq_printf(m, " %s\n", stat_buf+1); 3489 3490 seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n", 3491 info->tx_active,info->bh_requested,info->bh_running, 3492 info->pending_bh); 3493 3494 spin_lock_irqsave(&info->irq_spinlock,flags); 3495 { 3496 u16 Tcsr = usc_InReg( info, TCSR ); 3497 u16 Tdmr = usc_InDmaReg( info, TDMR ); 3498 u16 Ticr = usc_InReg( info, TICR ); 3499 u16 Rscr = usc_InReg( info, RCSR ); 3500 u16 Rdmr = usc_InDmaReg( info, RDMR ); 3501 u16 Ricr = usc_InReg( info, RICR ); 3502 u16 Icr = usc_InReg( info, ICR ); 3503 u16 Dccr = usc_InReg( info, DCCR ); 3504 u16 Tmr = usc_InReg( info, TMR ); 3505 u16 Tccr = usc_InReg( info, TCCR ); 3506 u16 Ccar = inw( info->io_base + CCAR ); 3507 seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n" 3508 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n", 3509 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar ); 3510 } 3511 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3512} 3513 3514/* Called to print information about devices */ 3515static int mgsl_proc_show(struct seq_file *m, void *v) 3516{ 3517 struct mgsl_struct *info; 3518 3519 seq_printf(m, "synclink driver:%s\n", driver_version); 3520 3521 info = mgsl_device_list; 3522 while( info ) { 3523 line_info(m, info); 3524 info = info->next_device; 3525 } 3526 return 0; 3527} 3528 3529/* mgsl_allocate_dma_buffers() 3530 * 3531 * Allocate and format DMA buffers (ISA adapter) 3532 * or format shared memory buffers (PCI adapter). 3533 * 3534 * Arguments: info pointer to device instance data 3535 * Return Value: 0 if success, otherwise error 3536 */ 3537static int mgsl_allocate_dma_buffers(struct mgsl_struct *info) 3538{ 3539 unsigned short BuffersPerFrame; 3540 3541 info->last_mem_alloc = 0; 3542 3543 /* Calculate the number of DMA buffers necessary to hold the */ 3544 /* largest allowable frame size. Note: If the max frame size is */ 3545 /* not an even multiple of the DMA buffer size then we need to */ 3546 /* round the buffer count per frame up one. */ 3547 3548 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE); 3549 if ( info->max_frame_size % DMABUFFERSIZE ) 3550 BuffersPerFrame++; 3551 3552 /* 3553 * The PCI adapter has 256KBytes of shared memory to use. This is 64 3554 * PAGE_SIZE buffers. 3555 * 3556 * The first page is used for padding at this time so the buffer list 3557 * does not begin at offset 0 of the PCI adapter's shared memory. 3558 * 3559 * The 2nd page is used for the buffer list. A 4K buffer list can hold 3560 * 128 DMA_BUFFER structures at 32 bytes each. 3561 * 3562 * This leaves 62 4K pages. 3563 * 3564 * The next N pages are used for transmit frame(s). We reserve enough 3565 * 4K page blocks to hold the required number of transmit dma buffers 3566 * (num_tx_dma_buffers), each of MaxFrameSize size. 3567 * 3568 * Of the remaining pages (62-N), determine how many can be used to 3569 * receive full MaxFrameSize inbound frames 3570 */ 3571 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame; 3572 info->rx_buffer_count = 62 - info->tx_buffer_count; 3573 3574 if ( debug_level >= DEBUG_LEVEL_INFO ) 3575 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n", 3576 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count); 3577 3578 if ( mgsl_alloc_buffer_list_memory( info ) < 0 || 3579 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 || 3580 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 || 3581 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 || 3582 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) { 3583 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__); 3584 return -ENOMEM; 3585 } 3586 3587 mgsl_reset_rx_dma_buffers( info ); 3588 mgsl_reset_tx_dma_buffers( info ); 3589 3590 return 0; 3591 3592} /* end of mgsl_allocate_dma_buffers() */ 3593 3594/* 3595 * mgsl_alloc_buffer_list_memory() 3596 * 3597 * Allocate a common DMA buffer for use as the 3598 * receive and transmit buffer lists. 3599 * 3600 * A buffer list is a set of buffer entries where each entry contains 3601 * a pointer to an actual buffer and a pointer to the next buffer entry 3602 * (plus some other info about the buffer). 3603 * 3604 * The buffer entries for a list are built to form a circular list so 3605 * that when the entire list has been traversed you start back at the 3606 * beginning. 3607 * 3608 * This function allocates memory for just the buffer entries. 3609 * The links (pointer to next entry) are filled in with the physical 3610 * address of the next entry so the adapter can navigate the list 3611 * using bus master DMA. The pointers to the actual buffers are filled 3612 * out later when the actual buffers are allocated. 3613 * 3614 * Arguments: info pointer to device instance data 3615 * Return Value: 0 if success, otherwise error 3616 */ 3617static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info ) 3618{ 3619 unsigned int i; 3620 3621 /* PCI adapter uses shared memory. */ 3622 info->buffer_list = info->memory_base + info->last_mem_alloc; 3623 info->buffer_list_phys = info->last_mem_alloc; 3624 info->last_mem_alloc += BUFFERLISTSIZE; 3625 3626 /* We got the memory for the buffer entry lists. */ 3627 /* Initialize the memory block to all zeros. */ 3628 memset( info->buffer_list, 0, BUFFERLISTSIZE ); 3629 3630 /* Save virtual address pointers to the receive and */ 3631 /* transmit buffer lists. (Receive 1st). These pointers will */ 3632 /* be used by the processor to access the lists. */ 3633 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list; 3634 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list; 3635 info->tx_buffer_list += info->rx_buffer_count; 3636 3637 /* 3638 * Build the links for the buffer entry lists such that 3639 * two circular lists are built. (Transmit and Receive). 3640 * 3641 * Note: the links are physical addresses 3642 * which are read by the adapter to determine the next 3643 * buffer entry to use. 3644 */ 3645 3646 for ( i = 0; i < info->rx_buffer_count; i++ ) { 3647 /* calculate and store physical address of this buffer entry */ 3648 info->rx_buffer_list[i].phys_entry = 3649 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY)); 3650 3651 /* calculate and store physical address of */ 3652 /* next entry in cirular list of entries */ 3653 3654 info->rx_buffer_list[i].link = info->buffer_list_phys; 3655 3656 if ( i < info->rx_buffer_count - 1 ) 3657 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY); 3658 } 3659 3660 for ( i = 0; i < info->tx_buffer_count; i++ ) { 3661 /* calculate and store physical address of this buffer entry */ 3662 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys + 3663 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY)); 3664 3665 /* calculate and store physical address of */ 3666 /* next entry in cirular list of entries */ 3667 3668 info->tx_buffer_list[i].link = info->buffer_list_phys + 3669 info->rx_buffer_count * sizeof(DMABUFFERENTRY); 3670 3671 if ( i < info->tx_buffer_count - 1 ) 3672 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY); 3673 } 3674 3675 return 0; 3676 3677} /* end of mgsl_alloc_buffer_list_memory() */ 3678 3679/* Free DMA buffers allocated for use as the 3680 * receive and transmit buffer lists. 3681 * Warning: 3682 * 3683 * The data transfer buffers associated with the buffer list 3684 * MUST be freed before freeing the buffer list itself because 3685 * the buffer list contains the information necessary to free 3686 * the individual buffers! 3687 */ 3688static void mgsl_free_buffer_list_memory( struct mgsl_struct *info ) 3689{ 3690 info->buffer_list = NULL; 3691 info->rx_buffer_list = NULL; 3692 info->tx_buffer_list = NULL; 3693 3694} /* end of mgsl_free_buffer_list_memory() */ 3695 3696/* 3697 * mgsl_alloc_frame_memory() 3698 * 3699 * Allocate the frame DMA buffers used by the specified buffer list. 3700 * Each DMA buffer will be one memory page in size. This is necessary 3701 * because memory can fragment enough that it may be impossible 3702 * contiguous pages. 3703 * 3704 * Arguments: 3705 * 3706 * info pointer to device instance data 3707 * BufferList pointer to list of buffer entries 3708 * Buffercount count of buffer entries in buffer list 3709 * 3710 * Return Value: 0 if success, otherwise -ENOMEM 3711 */ 3712static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount) 3713{ 3714 int i; 3715 3716 /* Allocate page sized buffers for the receive buffer list */ 3717 3718 for ( i = 0; i < Buffercount; i++ ) { 3719 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc; 3720 BufferList[i].phys_addr = info->last_mem_alloc; 3721 info->last_mem_alloc += DMABUFFERSIZE; 3722 } 3723 3724 return 0; 3725 3726} /* end of mgsl_alloc_frame_memory() */ 3727 3728/* 3729 * mgsl_free_frame_memory() 3730 * 3731 * Free the buffers associated with 3732 * each buffer entry of a buffer list. 3733 * 3734 * Arguments: 3735 * 3736 * info pointer to device instance data 3737 * BufferList pointer to list of buffer entries 3738 * Buffercount count of buffer entries in buffer list 3739 * 3740 * Return Value: None 3741 */ 3742static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount) 3743{ 3744 int i; 3745 3746 if ( BufferList ) { 3747 for ( i = 0 ; i < Buffercount ; i++ ) { 3748 if ( BufferList[i].virt_addr ) { 3749 BufferList[i].virt_addr = NULL; 3750 } 3751 } 3752 } 3753 3754} /* end of mgsl_free_frame_memory() */ 3755 3756/* mgsl_free_dma_buffers() 3757 * 3758 * Free DMA buffers 3759 * 3760 * Arguments: info pointer to device instance data 3761 * Return Value: None 3762 */ 3763static void mgsl_free_dma_buffers( struct mgsl_struct *info ) 3764{ 3765 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count ); 3766 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count ); 3767 mgsl_free_buffer_list_memory( info ); 3768 3769} /* end of mgsl_free_dma_buffers() */ 3770 3771 3772/* 3773 * mgsl_alloc_intermediate_rxbuffer_memory() 3774 * 3775 * Allocate a buffer large enough to hold max_frame_size. This buffer 3776 * is used to pass an assembled frame to the line discipline. 3777 * 3778 * Arguments: 3779 * 3780 * info pointer to device instance data 3781 * 3782 * Return Value: 0 if success, otherwise -ENOMEM 3783 */ 3784static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info) 3785{ 3786 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA); 3787 if ( info->intermediate_rxbuffer == NULL ) 3788 return -ENOMEM; 3789 /* unused flag buffer to satisfy receive_buf calling interface */ 3790 info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL); 3791 if (!info->flag_buf) { 3792 kfree(info->intermediate_rxbuffer); 3793 info->intermediate_rxbuffer = NULL; 3794 return -ENOMEM; 3795 } 3796 return 0; 3797 3798} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */ 3799 3800/* 3801 * mgsl_free_intermediate_rxbuffer_memory() 3802 * 3803 * 3804 * Arguments: 3805 * 3806 * info pointer to device instance data 3807 * 3808 * Return Value: None 3809 */ 3810static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info) 3811{ 3812 kfree(info->intermediate_rxbuffer); 3813 info->intermediate_rxbuffer = NULL; 3814 kfree(info->flag_buf); 3815 info->flag_buf = NULL; 3816 3817} /* end of mgsl_free_intermediate_rxbuffer_memory() */ 3818 3819/* 3820 * mgsl_alloc_intermediate_txbuffer_memory() 3821 * 3822 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size. 3823 * This buffer is used to load transmit frames into the adapter's dma transfer 3824 * buffers when there is sufficient space. 3825 * 3826 * Arguments: 3827 * 3828 * info pointer to device instance data 3829 * 3830 * Return Value: 0 if success, otherwise -ENOMEM 3831 */ 3832static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info) 3833{ 3834 int i; 3835 3836 if ( debug_level >= DEBUG_LEVEL_INFO ) 3837 printk("%s %s(%d) allocating %d tx holding buffers\n", 3838 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers); 3839 3840 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers)); 3841 3842 for ( i=0; i<info->num_tx_holding_buffers; ++i) { 3843 info->tx_holding_buffers[i].buffer = 3844 kmalloc(info->max_frame_size, GFP_KERNEL); 3845 if (info->tx_holding_buffers[i].buffer == NULL) { 3846 for (--i; i >= 0; i--) { 3847 kfree(info->tx_holding_buffers[i].buffer); 3848 info->tx_holding_buffers[i].buffer = NULL; 3849 } 3850 return -ENOMEM; 3851 } 3852 } 3853 3854 return 0; 3855 3856} /* end of mgsl_alloc_intermediate_txbuffer_memory() */ 3857 3858/* 3859 * mgsl_free_intermediate_txbuffer_memory() 3860 * 3861 * 3862 * Arguments: 3863 * 3864 * info pointer to device instance data 3865 * 3866 * Return Value: None 3867 */ 3868static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info) 3869{ 3870 int i; 3871 3872 for ( i=0; i<info->num_tx_holding_buffers; ++i ) { 3873 kfree(info->tx_holding_buffers[i].buffer); 3874 info->tx_holding_buffers[i].buffer = NULL; 3875 } 3876 3877 info->get_tx_holding_index = 0; 3878 info->put_tx_holding_index = 0; 3879 info->tx_holding_count = 0; 3880 3881} /* end of mgsl_free_intermediate_txbuffer_memory() */ 3882 3883 3884/* 3885 * load_next_tx_holding_buffer() 3886 * 3887 * attempts to load the next buffered tx request into the 3888 * tx dma buffers 3889 * 3890 * Arguments: 3891 * 3892 * info pointer to device instance data 3893 * 3894 * Return Value: true if next buffered tx request loaded 3895 * into adapter's tx dma buffer, 3896 * false otherwise 3897 */ 3898static bool load_next_tx_holding_buffer(struct mgsl_struct *info) 3899{ 3900 bool ret = false; 3901 3902 if ( info->tx_holding_count ) { 3903 /* determine if we have enough tx dma buffers 3904 * to accommodate the next tx frame 3905 */ 3906 struct tx_holding_buffer *ptx = 3907 &info->tx_holding_buffers[info->get_tx_holding_index]; 3908 int num_free = num_free_tx_dma_buffers(info); 3909 int num_needed = ptx->buffer_size / DMABUFFERSIZE; 3910 if ( ptx->buffer_size % DMABUFFERSIZE ) 3911 ++num_needed; 3912 3913 if (num_needed <= num_free) { 3914 info->xmit_cnt = ptx->buffer_size; 3915 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size); 3916 3917 --info->tx_holding_count; 3918 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers) 3919 info->get_tx_holding_index=0; 3920 3921 /* restart transmit timer */ 3922 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000)); 3923 3924 ret = true; 3925 } 3926 } 3927 3928 return ret; 3929} 3930 3931/* 3932 * save_tx_buffer_request() 3933 * 3934 * attempt to store transmit frame request for later transmission 3935 * 3936 * Arguments: 3937 * 3938 * info pointer to device instance data 3939 * Buffer pointer to buffer containing frame to load 3940 * BufferSize size in bytes of frame in Buffer 3941 * 3942 * Return Value: 1 if able to store, 0 otherwise 3943 */ 3944static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize) 3945{ 3946 struct tx_holding_buffer *ptx; 3947 3948 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) { 3949 return 0; /* all buffers in use */ 3950 } 3951 3952 ptx = &info->tx_holding_buffers[info->put_tx_holding_index]; 3953 ptx->buffer_size = BufferSize; 3954 memcpy( ptx->buffer, Buffer, BufferSize); 3955 3956 ++info->tx_holding_count; 3957 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers) 3958 info->put_tx_holding_index=0; 3959 3960 return 1; 3961} 3962 3963static int mgsl_claim_resources(struct mgsl_struct *info) 3964{ 3965 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) { 3966 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n", 3967 __FILE__,__LINE__,info->device_name, info->io_base); 3968 return -ENODEV; 3969 } 3970 info->io_addr_requested = true; 3971 3972 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags, 3973 info->device_name, info ) < 0 ) { 3974 printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n", 3975 __FILE__,__LINE__,info->device_name, info->irq_level ); 3976 goto errout; 3977 } 3978 info->irq_requested = true; 3979 3980 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) { 3981 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n", 3982 __FILE__,__LINE__,info->device_name, info->phys_memory_base); 3983 goto errout; 3984 } 3985 info->shared_mem_requested = true; 3986 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) { 3987 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n", 3988 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset); 3989 goto errout; 3990 } 3991 info->lcr_mem_requested = true; 3992 3993 info->memory_base = ioremap(info->phys_memory_base, 0x40000); 3994 if (!info->memory_base) { 3995 printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n", 3996 __FILE__,__LINE__,info->device_name, info->phys_memory_base ); 3997 goto errout; 3998 } 3999 4000 if ( !mgsl_memory_test(info) ) { 4001 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n", 4002 __FILE__,__LINE__,info->device_name, info->phys_memory_base ); 4003 goto errout; 4004 } 4005 4006 info->lcr_base = ioremap(info->phys_lcr_base, PAGE_SIZE); 4007 if (!info->lcr_base) { 4008 printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n", 4009 __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); 4010 goto errout; 4011 } 4012 info->lcr_base += info->lcr_offset; 4013 4014 if ( mgsl_allocate_dma_buffers(info) < 0 ) { 4015 printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n", 4016 __FILE__,__LINE__,info->device_name, info->dma_level ); 4017 goto errout; 4018 } 4019 4020 return 0; 4021errout: 4022 mgsl_release_resources(info); 4023 return -ENODEV; 4024 4025} /* end of mgsl_claim_resources() */ 4026 4027static void mgsl_release_resources(struct mgsl_struct *info) 4028{ 4029 if ( debug_level >= DEBUG_LEVEL_INFO ) 4030 printk( "%s(%d):mgsl_release_resources(%s) entry\n", 4031 __FILE__,__LINE__,info->device_name ); 4032 4033 if ( info->irq_requested ) { 4034 free_irq(info->irq_level, info); 4035 info->irq_requested = false; 4036 } 4037 if ( info->dma_requested ) { 4038 disable_dma(info->dma_level); 4039 free_dma(info->dma_level); 4040 info->dma_requested = false; 4041 } 4042 mgsl_free_dma_buffers(info); 4043 mgsl_free_intermediate_rxbuffer_memory(info); 4044 mgsl_free_intermediate_txbuffer_memory(info); 4045 4046 if ( info->io_addr_requested ) { 4047 release_region(info->io_base,info->io_addr_size); 4048 info->io_addr_requested = false; 4049 } 4050 if ( info->shared_mem_requested ) { 4051 release_mem_region(info->phys_memory_base,0x40000); 4052 info->shared_mem_requested = false; 4053 } 4054 if ( info->lcr_mem_requested ) { 4055 release_mem_region(info->phys_lcr_base + info->lcr_offset,128); 4056 info->lcr_mem_requested = false; 4057 } 4058 if (info->memory_base){ 4059 iounmap(info->memory_base); 4060 info->memory_base = NULL; 4061 } 4062 if (info->lcr_base){ 4063 iounmap(info->lcr_base - info->lcr_offset); 4064 info->lcr_base = NULL; 4065 } 4066 4067 if ( debug_level >= DEBUG_LEVEL_INFO ) 4068 printk( "%s(%d):mgsl_release_resources(%s) exit\n", 4069 __FILE__,__LINE__,info->device_name ); 4070 4071} /* end of mgsl_release_resources() */ 4072 4073/* mgsl_add_device() 4074 * 4075 * Add the specified device instance data structure to the 4076 * global linked list of devices and increment the device count. 4077 * 4078 * Arguments: info pointer to device instance data 4079 * Return Value: None 4080 */ 4081static void mgsl_add_device( struct mgsl_struct *info ) 4082{ 4083 info->next_device = NULL; 4084 info->line = mgsl_device_count; 4085 sprintf(info->device_name,"ttySL%d",info->line); 4086 4087 if (info->line < MAX_TOTAL_DEVICES) { 4088 if (maxframe[info->line]) 4089 info->max_frame_size = maxframe[info->line]; 4090 4091 if (txdmabufs[info->line]) { 4092 info->num_tx_dma_buffers = txdmabufs[info->line]; 4093 if (info->num_tx_dma_buffers < 1) 4094 info->num_tx_dma_buffers = 1; 4095 } 4096 4097 if (txholdbufs[info->line]) { 4098 info->num_tx_holding_buffers = txholdbufs[info->line]; 4099 if (info->num_tx_holding_buffers < 1) 4100 info->num_tx_holding_buffers = 1; 4101 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS) 4102 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS; 4103 } 4104 } 4105 4106 mgsl_device_count++; 4107 4108 if ( !mgsl_device_list ) 4109 mgsl_device_list = info; 4110 else { 4111 struct mgsl_struct *current_dev = mgsl_device_list; 4112 while( current_dev->next_device ) 4113 current_dev = current_dev->next_device; 4114 current_dev->next_device = info; 4115 } 4116 4117 if ( info->max_frame_size < 4096 ) 4118 info->max_frame_size = 4096; 4119 else if ( info->max_frame_size > 65535 ) 4120 info->max_frame_size = 65535; 4121 4122 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n", 4123 info->hw_version + 1, info->device_name, info->io_base, info->irq_level, 4124 info->phys_memory_base, info->phys_lcr_base, 4125 info->max_frame_size ); 4126 4127#if SYNCLINK_GENERIC_HDLC 4128 hdlcdev_init(info); 4129#endif 4130 4131} /* end of mgsl_add_device() */ 4132 4133static const struct tty_port_operations mgsl_port_ops = { 4134 .carrier_raised = carrier_raised, 4135 .dtr_rts = dtr_rts, 4136}; 4137 4138 4139/* mgsl_allocate_device() 4140 * 4141 * Allocate and initialize a device instance structure 4142 * 4143 * Arguments: none 4144 * Return Value: pointer to mgsl_struct if success, otherwise NULL 4145 */ 4146static struct mgsl_struct* mgsl_allocate_device(void) 4147{ 4148 struct mgsl_struct *info; 4149 4150 info = kzalloc(sizeof(struct mgsl_struct), 4151 GFP_KERNEL); 4152 4153 if (!info) { 4154 printk("Error can't allocate device instance data\n"); 4155 } else { 4156 tty_port_init(&info->port); 4157 info->port.ops = &mgsl_port_ops; 4158 info->magic = MGSL_MAGIC; 4159 INIT_WORK(&info->task, mgsl_bh_handler); 4160 info->max_frame_size = 4096; 4161 info->port.close_delay = 5*HZ/10; 4162 info->port.closing_wait = 30*HZ; 4163 init_waitqueue_head(&info->status_event_wait_q); 4164 init_waitqueue_head(&info->event_wait_q); 4165 spin_lock_init(&info->irq_spinlock); 4166 spin_lock_init(&info->netlock); 4167 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); 4168 info->idle_mode = HDLC_TXIDLE_FLAGS; 4169 info->num_tx_dma_buffers = 1; 4170 info->num_tx_holding_buffers = 0; 4171 } 4172 4173 return info; 4174 4175} /* end of mgsl_allocate_device()*/ 4176 4177static const struct tty_operations mgsl_ops = { 4178 .install = mgsl_install, 4179 .open = mgsl_open, 4180 .close = mgsl_close, 4181 .write = mgsl_write, 4182 .put_char = mgsl_put_char, 4183 .flush_chars = mgsl_flush_chars, 4184 .write_room = mgsl_write_room, 4185 .chars_in_buffer = mgsl_chars_in_buffer, 4186 .flush_buffer = mgsl_flush_buffer, 4187 .ioctl = mgsl_ioctl, 4188 .throttle = mgsl_throttle, 4189 .unthrottle = mgsl_unthrottle, 4190 .send_xchar = mgsl_send_xchar, 4191 .break_ctl = mgsl_break, 4192 .wait_until_sent = mgsl_wait_until_sent, 4193 .set_termios = mgsl_set_termios, 4194 .stop = mgsl_stop, 4195 .start = mgsl_start, 4196 .hangup = mgsl_hangup, 4197 .tiocmget = tiocmget, 4198 .tiocmset = tiocmset, 4199 .get_icount = msgl_get_icount, 4200 .proc_show = mgsl_proc_show, 4201}; 4202 4203/* 4204 * perform tty device initialization 4205 */ 4206static int mgsl_init_tty(void) 4207{ 4208 int rc; 4209 4210 serial_driver = alloc_tty_driver(128); 4211 if (!serial_driver) 4212 return -ENOMEM; 4213 4214 serial_driver->driver_name = "synclink"; 4215 serial_driver->name = "ttySL"; 4216 serial_driver->major = ttymajor; 4217 serial_driver->minor_start = 64; 4218 serial_driver->type = TTY_DRIVER_TYPE_SERIAL; 4219 serial_driver->subtype = SERIAL_TYPE_NORMAL; 4220 serial_driver->init_termios = tty_std_termios; 4221 serial_driver->init_termios.c_cflag = 4222 B9600 | CS8 | CREAD | HUPCL | CLOCAL; 4223 serial_driver->init_termios.c_ispeed = 9600; 4224 serial_driver->init_termios.c_ospeed = 9600; 4225 serial_driver->flags = TTY_DRIVER_REAL_RAW; 4226 tty_set_operations(serial_driver, &mgsl_ops); 4227 if ((rc = tty_register_driver(serial_driver)) < 0) { 4228 printk("%s(%d):Couldn't register serial driver\n", 4229 __FILE__,__LINE__); 4230 put_tty_driver(serial_driver); 4231 serial_driver = NULL; 4232 return rc; 4233 } 4234 4235 printk("%s %s, tty major#%d\n", 4236 driver_name, driver_version, 4237 serial_driver->major); 4238 return 0; 4239} 4240 4241static void synclink_cleanup(void) 4242{ 4243 int rc; 4244 struct mgsl_struct *info; 4245 struct mgsl_struct *tmp; 4246 4247 printk("Unloading %s: %s\n", driver_name, driver_version); 4248 4249 if (serial_driver) { 4250 rc = tty_unregister_driver(serial_driver); 4251 if (rc) 4252 printk("%s(%d) failed to unregister tty driver err=%d\n", 4253 __FILE__,__LINE__,rc); 4254 put_tty_driver(serial_driver); 4255 } 4256 4257 info = mgsl_device_list; 4258 while(info) { 4259#if SYNCLINK_GENERIC_HDLC 4260 hdlcdev_exit(info); 4261#endif 4262 mgsl_release_resources(info); 4263 tmp = info; 4264 info = info->next_device; 4265 tty_port_destroy(&tmp->port); 4266 kfree(tmp); 4267 } 4268 4269 if (pci_registered) 4270 pci_unregister_driver(&synclink_pci_driver); 4271} 4272 4273static int __init synclink_init(void) 4274{ 4275 int rc; 4276 4277 if (break_on_load) { 4278 mgsl_get_text_ptr(); 4279 BREAKPOINT(); 4280 } 4281 4282 printk("%s %s\n", driver_name, driver_version); 4283 4284 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0) 4285 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc); 4286 else 4287 pci_registered = true; 4288 4289 if ((rc = mgsl_init_tty()) < 0) 4290 goto error; 4291 4292 return 0; 4293 4294error: 4295 synclink_cleanup(); 4296 return rc; 4297} 4298 4299static void __exit synclink_exit(void) 4300{ 4301 synclink_cleanup(); 4302} 4303 4304module_init(synclink_init); 4305module_exit(synclink_exit); 4306 4307/* 4308 * usc_RTCmd() 4309 * 4310 * Issue a USC Receive/Transmit command to the 4311 * Channel Command/Address Register (CCAR). 4312 * 4313 * Notes: 4314 * 4315 * The command is encoded in the most significant 5 bits <15..11> 4316 * of the CCAR value. Bits <10..7> of the CCAR must be preserved 4317 * and Bits <6..0> must be written as zeros. 4318 * 4319 * Arguments: 4320 * 4321 * info pointer to device information structure 4322 * Cmd command mask (use symbolic macros) 4323 * 4324 * Return Value: 4325 * 4326 * None 4327 */ 4328static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd ) 4329{ 4330 /* output command to CCAR in bits <15..11> */ 4331 /* preserve bits <10..7>, bits <6..0> must be zero */ 4332 4333 outw( Cmd + info->loopback_bits, info->io_base + CCAR ); 4334 4335 /* Read to flush write to CCAR */ 4336 inw( info->io_base + CCAR ); 4337 4338} /* end of usc_RTCmd() */ 4339 4340/* 4341 * usc_DmaCmd() 4342 * 4343 * Issue a DMA command to the DMA Command/Address Register (DCAR). 4344 * 4345 * Arguments: 4346 * 4347 * info pointer to device information structure 4348 * Cmd DMA command mask (usc_DmaCmd_XX Macros) 4349 * 4350 * Return Value: 4351 * 4352 * None 4353 */ 4354static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd ) 4355{ 4356 /* write command mask to DCAR */ 4357 outw( Cmd + info->mbre_bit, info->io_base ); 4358 4359 /* Read to flush write to DCAR */ 4360 inw( info->io_base ); 4361 4362} /* end of usc_DmaCmd() */ 4363 4364/* 4365 * usc_OutDmaReg() 4366 * 4367 * Write a 16-bit value to a USC DMA register 4368 * 4369 * Arguments: 4370 * 4371 * info pointer to device info structure 4372 * RegAddr register address (number) for write 4373 * RegValue 16-bit value to write to register 4374 * 4375 * Return Value: 4376 * 4377 * None 4378 * 4379 */ 4380static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue ) 4381{ 4382 /* Note: The DCAR is located at the adapter base address */ 4383 /* Note: must preserve state of BIT8 in DCAR */ 4384 4385 outw( RegAddr + info->mbre_bit, info->io_base ); 4386 outw( RegValue, info->io_base ); 4387 4388 /* Read to flush write to DCAR */ 4389 inw( info->io_base ); 4390 4391} /* end of usc_OutDmaReg() */ 4392 4393/* 4394 * usc_InDmaReg() 4395 * 4396 * Read a 16-bit value from a DMA register 4397 * 4398 * Arguments: 4399 * 4400 * info pointer to device info structure 4401 * RegAddr register address (number) to read from 4402 * 4403 * Return Value: 4404 * 4405 * The 16-bit value read from register 4406 * 4407 */ 4408static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr ) 4409{ 4410 /* Note: The DCAR is located at the adapter base address */ 4411 /* Note: must preserve state of BIT8 in DCAR */ 4412 4413 outw( RegAddr + info->mbre_bit, info->io_base ); 4414 return inw( info->io_base ); 4415 4416} /* end of usc_InDmaReg() */ 4417 4418/* 4419 * 4420 * usc_OutReg() 4421 * 4422 * Write a 16-bit value to a USC serial channel register 4423 * 4424 * Arguments: 4425 * 4426 * info pointer to device info structure 4427 * RegAddr register address (number) to write to 4428 * RegValue 16-bit value to write to register 4429 * 4430 * Return Value: 4431 * 4432 * None 4433 * 4434 */ 4435static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue ) 4436{ 4437 outw( RegAddr + info->loopback_bits, info->io_base + CCAR ); 4438 outw( RegValue, info->io_base + CCAR ); 4439 4440 /* Read to flush write to CCAR */ 4441 inw( info->io_base + CCAR ); 4442 4443} /* end of usc_OutReg() */ 4444 4445/* 4446 * usc_InReg() 4447 * 4448 * Reads a 16-bit value from a USC serial channel register 4449 * 4450 * Arguments: 4451 * 4452 * info pointer to device extension 4453 * RegAddr register address (number) to read from 4454 * 4455 * Return Value: 4456 * 4457 * 16-bit value read from register 4458 */ 4459static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr ) 4460{ 4461 outw( RegAddr + info->loopback_bits, info->io_base + CCAR ); 4462 return inw( info->io_base + CCAR ); 4463 4464} /* end of usc_InReg() */ 4465 4466/* usc_set_sdlc_mode() 4467 * 4468 * Set up the adapter for SDLC DMA communications. 4469 * 4470 * Arguments: info pointer to device instance data 4471 * Return Value: NONE 4472 */ 4473static void usc_set_sdlc_mode( struct mgsl_struct *info ) 4474{ 4475 u16 RegValue; 4476 bool PreSL1660; 4477 4478 /* 4479 * determine if the IUSC on the adapter is pre-SL1660. If 4480 * not, take advantage of the UnderWait feature of more 4481 * modern chips. If an underrun occurs and this bit is set, 4482 * the transmitter will idle the programmed idle pattern 4483 * until the driver has time to service the underrun. Otherwise, 4484 * the dma controller may get the cycles previously requested 4485 * and begin transmitting queued tx data. 4486 */ 4487 usc_OutReg(info,TMCR,0x1f); 4488 RegValue=usc_InReg(info,TMDR); 4489 PreSL1660 = (RegValue == IUSC_PRE_SL1660); 4490 4491 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 4492 { 4493 /* 4494 ** Channel Mode Register (CMR) 4495 ** 4496 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun 4497 ** <13> 0 0 = Transmit Disabled (initially) 4498 ** <12> 0 1 = Consecutive Idles share common 0 4499 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop 4500 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling 4501 ** <3..0> 0110 Receiver Mode = HDLC/SDLC 4502 ** 4503 ** 1000 1110 0000 0110 = 0x8e06 4504 */ 4505 RegValue = 0x8e06; 4506 4507 /*-------------------------------------------------- 4508 * ignore user options for UnderRun Actions and 4509 * preambles 4510 *--------------------------------------------------*/ 4511 } 4512 else 4513 { 4514 /* Channel mode Register (CMR) 4515 * 4516 * <15..14> 00 Tx Sub modes, Underrun Action 4517 * <13> 0 1 = Send Preamble before opening flag 4518 * <12> 0 1 = Consecutive Idles share common 0 4519 * <11..8> 0110 Transmitter mode = HDLC/SDLC 4520 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling 4521 * <3..0> 0110 Receiver mode = HDLC/SDLC 4522 * 4523 * 0000 0110 0000 0110 = 0x0606 4524 */ 4525 if (info->params.mode == MGSL_MODE_RAW) { 4526 RegValue = 0x0001; /* Set Receive mode = external sync */ 4527 4528 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */ 4529 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12)); 4530 4531 /* 4532 * TxSubMode: 4533 * CMR <15> 0 Don't send CRC on Tx Underrun 4534 * CMR <14> x undefined 4535 * CMR <13> 0 Send preamble before openning sync 4536 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength 4537 * 4538 * TxMode: 4539 * CMR <11-8) 0100 MonoSync 4540 * 4541 * 0x00 0100 xxxx xxxx 04xx 4542 */ 4543 RegValue |= 0x0400; 4544 } 4545 else { 4546 4547 RegValue = 0x0606; 4548 4549 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 ) 4550 RegValue |= BIT14; 4551 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG ) 4552 RegValue |= BIT15; 4553 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC ) 4554 RegValue |= BIT15 | BIT14; 4555 } 4556 4557 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE ) 4558 RegValue |= BIT13; 4559 } 4560 4561 if ( info->params.mode == MGSL_MODE_HDLC && 4562 (info->params.flags & HDLC_FLAG_SHARE_ZERO) ) 4563 RegValue |= BIT12; 4564 4565 if ( info->params.addr_filter != 0xff ) 4566 { 4567 /* set up receive address filtering */ 4568 usc_OutReg( info, RSR, info->params.addr_filter ); 4569 RegValue |= BIT4; 4570 } 4571 4572 usc_OutReg( info, CMR, RegValue ); 4573 info->cmr_value = RegValue; 4574 4575 /* Receiver mode Register (RMR) 4576 * 4577 * <15..13> 000 encoding 4578 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1) 4579 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC) 4580 * <9> 0 1 = Include Receive chars in CRC 4581 * <8> 1 1 = Use Abort/PE bit as abort indicator 4582 * <7..6> 00 Even parity 4583 * <5> 0 parity disabled 4584 * <4..2> 000 Receive Char Length = 8 bits 4585 * <1..0> 00 Disable Receiver 4586 * 4587 * 0000 0101 0000 0000 = 0x0500 4588 */ 4589 4590 RegValue = 0x0500; 4591 4592 switch ( info->params.encoding ) { 4593 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break; 4594 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break; 4595 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break; 4596 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break; 4597 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break; 4598 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break; 4599 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break; 4600 } 4601 4602 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT ) 4603 RegValue |= BIT9; 4604 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT ) 4605 RegValue |= ( BIT12 | BIT10 | BIT9 ); 4606 4607 usc_OutReg( info, RMR, RegValue ); 4608 4609 /* Set the Receive count Limit Register (RCLR) to 0xffff. */ 4610 /* When an opening flag of an SDLC frame is recognized the */ 4611 /* Receive Character count (RCC) is loaded with the value in */ 4612 /* RCLR. The RCC is decremented for each received byte. The */ 4613 /* value of RCC is stored after the closing flag of the frame */ 4614 /* allowing the frame size to be computed. */ 4615 4616 usc_OutReg( info, RCLR, RCLRVALUE ); 4617 4618 usc_RCmd( info, RCmd_SelectRicrdma_level ); 4619 4620 /* Receive Interrupt Control Register (RICR) 4621 * 4622 * <15..8> ? RxFIFO DMA Request Level 4623 * <7> 0 Exited Hunt IA (Interrupt Arm) 4624 * <6> 0 Idle Received IA 4625 * <5> 0 Break/Abort IA 4626 * <4> 0 Rx Bound IA 4627 * <3> 1 Queued status reflects oldest 2 bytes in FIFO 4628 * <2> 0 Abort/PE IA 4629 * <1> 1 Rx Overrun IA 4630 * <0> 0 Select TC0 value for readback 4631 * 4632 * 0000 0000 0000 1000 = 0x000a 4633 */ 4634 4635 /* Carry over the Exit Hunt and Idle Received bits */ 4636 /* in case they have been armed by usc_ArmEvents. */ 4637 4638 RegValue = usc_InReg( info, RICR ) & 0xc0; 4639 4640 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) ); 4641 4642 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */ 4643 4644 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 4645 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 4646 4647 /* Transmit mode Register (TMR) 4648 * 4649 * <15..13> 000 encoding 4650 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1) 4651 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC) 4652 * <9> 0 1 = Tx CRC Enabled 4653 * <8> 0 1 = Append CRC to end of transmit frame 4654 * <7..6> 00 Transmit parity Even 4655 * <5> 0 Transmit parity Disabled 4656 * <4..2> 000 Tx Char Length = 8 bits 4657 * <1..0> 00 Disable Transmitter 4658 * 4659 * 0000 0100 0000 0000 = 0x0400 4660 */ 4661 4662 RegValue = 0x0400; 4663 4664 switch ( info->params.encoding ) { 4665 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break; 4666 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break; 4667 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break; 4668 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break; 4669 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break; 4670 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break; 4671 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break; 4672 } 4673 4674 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT ) 4675 RegValue |= BIT9 | BIT8; 4676 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT ) 4677 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8); 4678 4679 usc_OutReg( info, TMR, RegValue ); 4680 4681 usc_set_txidle( info ); 4682 4683 4684 usc_TCmd( info, TCmd_SelectTicrdma_level ); 4685 4686 /* Transmit Interrupt Control Register (TICR) 4687 * 4688 * <15..8> ? Transmit FIFO DMA Level 4689 * <7> 0 Present IA (Interrupt Arm) 4690 * <6> 0 Idle Sent IA 4691 * <5> 1 Abort Sent IA 4692 * <4> 1 EOF/EOM Sent IA 4693 * <3> 0 CRC Sent IA 4694 * <2> 1 1 = Wait for SW Trigger to Start Frame 4695 * <1> 1 Tx Underrun IA 4696 * <0> 0 TC0 constant on read back 4697 * 4698 * 0000 0000 0011 0110 = 0x0036 4699 */ 4700 4701 usc_OutReg( info, TICR, 0x0736 ); 4702 4703 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 4704 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 4705 4706 /* 4707 ** Transmit Command/Status Register (TCSR) 4708 ** 4709 ** <15..12> 0000 TCmd 4710 ** <11> 0/1 UnderWait 4711 ** <10..08> 000 TxIdle 4712 ** <7> x PreSent 4713 ** <6> x IdleSent 4714 ** <5> x AbortSent 4715 ** <4> x EOF/EOM Sent 4716 ** <3> x CRC Sent 4717 ** <2> x All Sent 4718 ** <1> x TxUnder 4719 ** <0> x TxEmpty 4720 ** 4721 ** 0000 0000 0000 0000 = 0x0000 4722 */ 4723 info->tcsr_value = 0; 4724 4725 if ( !PreSL1660 ) 4726 info->tcsr_value |= TCSR_UNDERWAIT; 4727 4728 usc_OutReg( info, TCSR, info->tcsr_value ); 4729 4730 /* Clock mode Control Register (CMCR) 4731 * 4732 * <15..14> 00 counter 1 Source = Disabled 4733 * <13..12> 00 counter 0 Source = Disabled 4734 * <11..10> 11 BRG1 Input is TxC Pin 4735 * <9..8> 11 BRG0 Input is TxC Pin 4736 * <7..6> 01 DPLL Input is BRG1 Output 4737 * <5..3> XXX TxCLK comes from Port 0 4738 * <2..0> XXX RxCLK comes from Port 1 4739 * 4740 * 0000 1111 0111 0111 = 0x0f77 4741 */ 4742 4743 RegValue = 0x0f40; 4744 4745 if ( info->params.flags & HDLC_FLAG_RXC_DPLL ) 4746 RegValue |= 0x0003; /* RxCLK from DPLL */ 4747 else if ( info->params.flags & HDLC_FLAG_RXC_BRG ) 4748 RegValue |= 0x0004; /* RxCLK from BRG0 */ 4749 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN) 4750 RegValue |= 0x0006; /* RxCLK from TXC Input */ 4751 else 4752 RegValue |= 0x0007; /* RxCLK from Port1 */ 4753 4754 if ( info->params.flags & HDLC_FLAG_TXC_DPLL ) 4755 RegValue |= 0x0018; /* TxCLK from DPLL */ 4756 else if ( info->params.flags & HDLC_FLAG_TXC_BRG ) 4757 RegValue |= 0x0020; /* TxCLK from BRG0 */ 4758 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN) 4759 RegValue |= 0x0038; /* RxCLK from TXC Input */ 4760 else 4761 RegValue |= 0x0030; /* TxCLK from Port0 */ 4762 4763 usc_OutReg( info, CMCR, RegValue ); 4764 4765 4766 /* Hardware Configuration Register (HCR) 4767 * 4768 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4 4769 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div 4770 * <12> 0 CVOK:0=report code violation in biphase 4771 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4 4772 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level 4773 * <7..6> 00 reserved 4774 * <5> 0 BRG1 mode:0=continuous,1=single cycle 4775 * <4> X BRG1 Enable 4776 * <3..2> 00 reserved 4777 * <1> 0 BRG0 mode:0=continuous,1=single cycle 4778 * <0> 0 BRG0 Enable 4779 */ 4780 4781 RegValue = 0x0000; 4782 4783 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL | HDLC_FLAG_TXC_DPLL) ) { 4784 u32 XtalSpeed; 4785 u32 DpllDivisor; 4786 u16 Tc; 4787 4788 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */ 4789 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */ 4790 4791 XtalSpeed = 11059200; 4792 4793 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) { 4794 DpllDivisor = 16; 4795 RegValue |= BIT10; 4796 } 4797 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) { 4798 DpllDivisor = 8; 4799 RegValue |= BIT11; 4800 } 4801 else 4802 DpllDivisor = 32; 4803 4804 /* Tc = (Xtal/Speed) - 1 */ 4805 /* If twice the remainder of (Xtal/Speed) is greater than Speed */ 4806 /* then rounding up gives a more precise time constant. Instead */ 4807 /* of rounding up and then subtracting 1 we just don't subtract */ 4808 /* the one in this case. */ 4809 4810 /*-------------------------------------------------- 4811 * ejz: for DPLL mode, application should use the 4812 * same clock speed as the partner system, even 4813 * though clocking is derived from the input RxData. 4814 * In case the user uses a 0 for the clock speed, 4815 * default to 0xffffffff and don't try to divide by 4816 * zero 4817 *--------------------------------------------------*/ 4818 if ( info->params.clock_speed ) 4819 { 4820 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed); 4821 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2) 4822 / info->params.clock_speed) ) 4823 Tc--; 4824 } 4825 else 4826 Tc = -1; 4827 4828 4829 /* Write 16-bit Time Constant for BRG1 */ 4830 usc_OutReg( info, TC1R, Tc ); 4831 4832 RegValue |= BIT4; /* enable BRG1 */ 4833 4834 switch ( info->params.encoding ) { 4835 case HDLC_ENCODING_NRZ: 4836 case HDLC_ENCODING_NRZB: 4837 case HDLC_ENCODING_NRZI_MARK: 4838 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break; 4839 case HDLC_ENCODING_BIPHASE_MARK: 4840 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break; 4841 case HDLC_ENCODING_BIPHASE_LEVEL: 4842 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 | BIT8; break; 4843 } 4844 } 4845 4846 usc_OutReg( info, HCR, RegValue ); 4847 4848 4849 /* Channel Control/status Register (CCSR) 4850 * 4851 * <15> X RCC FIFO Overflow status (RO) 4852 * <14> X RCC FIFO Not Empty status (RO) 4853 * <13> 0 1 = Clear RCC FIFO (WO) 4854 * <12> X DPLL Sync (RW) 4855 * <11> X DPLL 2 Missed Clocks status (RO) 4856 * <10> X DPLL 1 Missed Clock status (RO) 4857 * <9..8> 00 DPLL Resync on rising and falling edges (RW) 4858 * <7> X SDLC Loop On status (RO) 4859 * <6> X SDLC Loop Send status (RO) 4860 * <5> 1 Bypass counters for TxClk and RxClk (RW) 4861 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW) 4862 * <1..0> 00 reserved 4863 * 4864 * 0000 0000 0010 0000 = 0x0020 4865 */ 4866 4867 usc_OutReg( info, CCSR, 0x1020 ); 4868 4869 4870 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) { 4871 usc_OutReg( info, SICR, 4872 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) ); 4873 } 4874 4875 4876 /* enable Master Interrupt Enable bit (MIE) */ 4877 usc_EnableMasterIrqBit( info ); 4878 4879 usc_ClearIrqPendingBits( info, RECEIVE_STATUS | RECEIVE_DATA | 4880 TRANSMIT_STATUS | TRANSMIT_DATA | MISC); 4881 4882 /* arm RCC underflow interrupt */ 4883 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3)); 4884 usc_EnableInterrupts(info, MISC); 4885 4886 info->mbre_bit = 0; 4887 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */ 4888 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */ 4889 info->mbre_bit = BIT8; 4890 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */ 4891 4892 /* DMA Control Register (DCR) 4893 * 4894 * <15..14> 10 Priority mode = Alternating Tx/Rx 4895 * 01 Rx has priority 4896 * 00 Tx has priority 4897 * 4898 * <13> 1 Enable Priority Preempt per DCR<15..14> 4899 * (WARNING DCR<11..10> must be 00 when this is 1) 4900 * 0 Choose activate channel per DCR<11..10> 4901 * 4902 * <12> 0 Little Endian for Array/List 4903 * <11..10> 00 Both Channels can use each bus grant 4904 * <9..6> 0000 reserved 4905 * <5> 0 7 CLK - Minimum Bus Re-request Interval 4906 * <4> 0 1 = drive D/C and S/D pins 4907 * <3> 1 1 = Add one wait state to all DMA cycles. 4908 * <2> 0 1 = Strobe /UAS on every transfer. 4909 * <1..0> 11 Addr incrementing only affects LS24 bits 4910 * 4911 * 0110 0000 0000 1011 = 0x600b 4912 */ 4913 4914 /* PCI adapter does not need DMA wait state */ 4915 usc_OutDmaReg( info, DCR, 0xa00b ); 4916 4917 /* Receive DMA mode Register (RDMR) 4918 * 4919 * <15..14> 11 DMA mode = Linked List Buffer mode 4920 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry 4921 * <12> 1 Clear count of List Entry after fetching 4922 * <11..10> 00 Address mode = Increment 4923 * <9> 1 Terminate Buffer on RxBound 4924 * <8> 0 Bus Width = 16bits 4925 * <7..0> ? status Bits (write as 0s) 4926 * 4927 * 1111 0010 0000 0000 = 0xf200 4928 */ 4929 4930 usc_OutDmaReg( info, RDMR, 0xf200 ); 4931 4932 4933 /* Transmit DMA mode Register (TDMR) 4934 * 4935 * <15..14> 11 DMA mode = Linked List Buffer mode 4936 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry 4937 * <12> 1 Clear count of List Entry after fetching 4938 * <11..10> 00 Address mode = Increment 4939 * <9> 1 Terminate Buffer on end of frame 4940 * <8> 0 Bus Width = 16bits 4941 * <7..0> ? status Bits (Read Only so write as 0) 4942 * 4943 * 1111 0010 0000 0000 = 0xf200 4944 */ 4945 4946 usc_OutDmaReg( info, TDMR, 0xf200 ); 4947 4948 4949 /* DMA Interrupt Control Register (DICR) 4950 * 4951 * <15> 1 DMA Interrupt Enable 4952 * <14> 0 1 = Disable IEO from USC 4953 * <13> 0 1 = Don't provide vector during IntAck 4954 * <12> 1 1 = Include status in Vector 4955 * <10..2> 0 reserved, Must be 0s 4956 * <1> 0 1 = Rx DMA Interrupt Enabled 4957 * <0> 0 1 = Tx DMA Interrupt Enabled 4958 * 4959 * 1001 0000 0000 0000 = 0x9000 4960 */ 4961 4962 usc_OutDmaReg( info, DICR, 0x9000 ); 4963 4964 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */ 4965 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */ 4966 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */ 4967 4968 /* Channel Control Register (CCR) 4969 * 4970 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs) 4971 * <13> 0 Trigger Tx on SW Command Disabled 4972 * <12> 0 Flag Preamble Disabled 4973 * <11..10> 00 Preamble Length 4974 * <9..8> 00 Preamble Pattern 4975 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs) 4976 * <5> 0 Trigger Rx on SW Command Disabled 4977 * <4..0> 0 reserved 4978 * 4979 * 1000 0000 1000 0000 = 0x8080 4980 */ 4981 4982 RegValue = 0x8080; 4983 4984 switch ( info->params.preamble_length ) { 4985 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break; 4986 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break; 4987 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 | BIT10; break; 4988 } 4989 4990 switch ( info->params.preamble ) { 4991 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 | BIT12; break; 4992 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break; 4993 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break; 4994 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 | BIT8; break; 4995 } 4996 4997 usc_OutReg( info, CCR, RegValue ); 4998 4999 5000 /* 5001 * Burst/Dwell Control Register 5002 * 5003 * <15..8> 0x20 Maximum number of transfers per bus grant 5004 * <7..0> 0x00 Maximum number of clock cycles per bus grant 5005 */ 5006 5007 /* don't limit bus occupancy on PCI adapter */ 5008 usc_OutDmaReg( info, BDCR, 0x0000 ); 5009 5010 usc_stop_transmitter(info); 5011 usc_stop_receiver(info); 5012 5013} /* end of usc_set_sdlc_mode() */ 5014 5015/* usc_enable_loopback() 5016 * 5017 * Set the 16C32 for internal loopback mode. 5018 * The TxCLK and RxCLK signals are generated from the BRG0 and 5019 * the TxD is looped back to the RxD internally. 5020 * 5021 * Arguments: info pointer to device instance data 5022 * enable 1 = enable loopback, 0 = disable 5023 * Return Value: None 5024 */ 5025static void usc_enable_loopback(struct mgsl_struct *info, int enable) 5026{ 5027 if (enable) { 5028 /* blank external TXD output */ 5029 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7 | BIT6)); 5030 5031 /* Clock mode Control Register (CMCR) 5032 * 5033 * <15..14> 00 counter 1 Disabled 5034 * <13..12> 00 counter 0 Disabled 5035 * <11..10> 11 BRG1 Input is TxC Pin 5036 * <9..8> 11 BRG0 Input is TxC Pin 5037 * <7..6> 01 DPLL Input is BRG1 Output 5038 * <5..3> 100 TxCLK comes from BRG0 5039 * <2..0> 100 RxCLK comes from BRG0 5040 * 5041 * 0000 1111 0110 0100 = 0x0f64 5042 */ 5043 5044 usc_OutReg( info, CMCR, 0x0f64 ); 5045 5046 /* Write 16-bit Time Constant for BRG0 */ 5047 /* use clock speed if available, otherwise use 8 for diagnostics */ 5048 if (info->params.clock_speed) { 5049 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1)); 5050 } else 5051 usc_OutReg(info, TC0R, (u16)8); 5052 5053 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0 5054 mode = Continuous Set Bit 0 to enable BRG0. */ 5055 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 5056 5057 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 5058 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004)); 5059 5060 /* set Internal Data loopback mode */ 5061 info->loopback_bits = 0x300; 5062 outw( 0x0300, info->io_base + CCAR ); 5063 } else { 5064 /* enable external TXD output */ 5065 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7 | BIT6)); 5066 5067 /* clear Internal Data loopback mode */ 5068 info->loopback_bits = 0; 5069 outw( 0,info->io_base + CCAR ); 5070 } 5071 5072} /* end of usc_enable_loopback() */ 5073 5074/* usc_enable_aux_clock() 5075 * 5076 * Enabled the AUX clock output at the specified frequency. 5077 * 5078 * Arguments: 5079 * 5080 * info pointer to device extension 5081 * data_rate data rate of clock in bits per second 5082 * A data rate of 0 disables the AUX clock. 5083 * 5084 * Return Value: None 5085 */ 5086static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate ) 5087{ 5088 u32 XtalSpeed; 5089 u16 Tc; 5090 5091 if ( data_rate ) { 5092 XtalSpeed = 11059200; 5093 5094 5095 /* Tc = (Xtal/Speed) - 1 */ 5096 /* If twice the remainder of (Xtal/Speed) is greater than Speed */ 5097 /* then rounding up gives a more precise time constant. Instead */ 5098 /* of rounding up and then subtracting 1 we just don't subtract */ 5099 /* the one in this case. */ 5100 5101 5102 Tc = (u16)(XtalSpeed/data_rate); 5103 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) ) 5104 Tc--; 5105 5106 /* Write 16-bit Time Constant for BRG0 */ 5107 usc_OutReg( info, TC0R, Tc ); 5108 5109 /* 5110 * Hardware Configuration Register (HCR) 5111 * Clear Bit 1, BRG0 mode = Continuous 5112 * Set Bit 0 to enable BRG0. 5113 */ 5114 5115 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 5116 5117 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 5118 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) ); 5119 } else { 5120 /* data rate == 0 so turn off BRG0 */ 5121 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) ); 5122 } 5123 5124} /* end of usc_enable_aux_clock() */ 5125 5126/* 5127 * 5128 * usc_process_rxoverrun_sync() 5129 * 5130 * This function processes a receive overrun by resetting the 5131 * receive DMA buffers and issuing a Purge Rx FIFO command 5132 * to allow the receiver to continue receiving. 5133 * 5134 * Arguments: 5135 * 5136 * info pointer to device extension 5137 * 5138 * Return Value: None 5139 */ 5140static void usc_process_rxoverrun_sync( struct mgsl_struct *info ) 5141{ 5142 int start_index; 5143 int end_index; 5144 int frame_start_index; 5145 bool start_of_frame_found = false; 5146 bool end_of_frame_found = false; 5147 bool reprogram_dma = false; 5148 5149 DMABUFFERENTRY *buffer_list = info->rx_buffer_list; 5150 u32 phys_addr; 5151 5152 usc_DmaCmd( info, DmaCmd_PauseRxChannel ); 5153 usc_RCmd( info, RCmd_EnterHuntmode ); 5154 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5155 5156 /* CurrentRxBuffer points to the 1st buffer of the next */ 5157 /* possibly available receive frame. */ 5158 5159 frame_start_index = start_index = end_index = info->current_rx_buffer; 5160 5161 /* Search for an unfinished string of buffers. This means */ 5162 /* that a receive frame started (at least one buffer with */ 5163 /* count set to zero) but there is no terminiting buffer */ 5164 /* (status set to non-zero). */ 5165 5166 while( !buffer_list[end_index].count ) 5167 { 5168 /* Count field has been reset to zero by 16C32. */ 5169 /* This buffer is currently in use. */ 5170 5171 if ( !start_of_frame_found ) 5172 { 5173 start_of_frame_found = true; 5174 frame_start_index = end_index; 5175 end_of_frame_found = false; 5176 } 5177 5178 if ( buffer_list[end_index].status ) 5179 { 5180 /* Status field has been set by 16C32. */ 5181 /* This is the last buffer of a received frame. */ 5182 5183 /* We want to leave the buffers for this frame intact. */ 5184 /* Move on to next possible frame. */ 5185 5186 start_of_frame_found = false; 5187 end_of_frame_found = true; 5188 } 5189 5190 /* advance to next buffer entry in linked list */ 5191 end_index++; 5192 if ( end_index == info->rx_buffer_count ) 5193 end_index = 0; 5194 5195 if ( start_index == end_index ) 5196 { 5197 /* The entire list has been searched with all Counts == 0 and */ 5198 /* all Status == 0. The receive buffers are */ 5199 /* completely screwed, reset all receive buffers! */ 5200 mgsl_reset_rx_dma_buffers( info ); 5201 frame_start_index = 0; 5202 start_of_frame_found = false; 5203 reprogram_dma = true; 5204 break; 5205 } 5206 } 5207 5208 if ( start_of_frame_found && !end_of_frame_found ) 5209 { 5210 /* There is an unfinished string of receive DMA buffers */ 5211 /* as a result of the receiver overrun. */ 5212 5213 /* Reset the buffers for the unfinished frame */ 5214 /* and reprogram the receive DMA controller to start */ 5215 /* at the 1st buffer of unfinished frame. */ 5216 5217 start_index = frame_start_index; 5218 5219 do 5220 { 5221 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE; 5222 5223 /* Adjust index for wrap around. */ 5224 if ( start_index == info->rx_buffer_count ) 5225 start_index = 0; 5226 5227 } while( start_index != end_index ); 5228 5229 reprogram_dma = true; 5230 } 5231 5232 if ( reprogram_dma ) 5233 { 5234 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL); 5235 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS); 5236 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS); 5237 5238 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 5239 5240 /* This empties the receive FIFO and loads the RCC with RCLR */ 5241 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5242 5243 /* program 16C32 with physical address of 1st DMA buffer entry */ 5244 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry; 5245 usc_OutDmaReg( info, NRARL, (u16)phys_addr ); 5246 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) ); 5247 5248 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5249 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS ); 5250 usc_EnableInterrupts( info, RECEIVE_STATUS ); 5251 5252 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */ 5253 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */ 5254 5255 usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 ); 5256 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) ); 5257 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 5258 if ( info->params.flags & HDLC_FLAG_AUTO_DCD ) 5259 usc_EnableReceiver(info,ENABLE_AUTO_DCD); 5260 else 5261 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5262 } 5263 else 5264 { 5265 /* This empties the receive FIFO and loads the RCC with RCLR */ 5266 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5267 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5268 } 5269 5270} /* end of usc_process_rxoverrun_sync() */ 5271 5272/* usc_stop_receiver() 5273 * 5274 * Disable USC receiver 5275 * 5276 * Arguments: info pointer to device instance data 5277 * Return Value: None 5278 */ 5279static void usc_stop_receiver( struct mgsl_struct *info ) 5280{ 5281 if (debug_level >= DEBUG_LEVEL_ISR) 5282 printk("%s(%d):usc_stop_receiver(%s)\n", 5283 __FILE__,__LINE__, info->device_name ); 5284 5285 /* Disable receive DMA channel. */ 5286 /* This also disables receive DMA channel interrupts */ 5287 usc_DmaCmd( info, DmaCmd_ResetRxChannel ); 5288 5289 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5290 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS ); 5291 usc_DisableInterrupts( info, RECEIVE_DATA | RECEIVE_STATUS ); 5292 5293 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 5294 5295 /* This empties the receive FIFO and loads the RCC with RCLR */ 5296 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5297 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5298 5299 info->rx_enabled = false; 5300 info->rx_overflow = false; 5301 info->rx_rcc_underrun = false; 5302 5303} /* end of stop_receiver() */ 5304 5305/* usc_start_receiver() 5306 * 5307 * Enable the USC receiver 5308 * 5309 * Arguments: info pointer to device instance data 5310 * Return Value: None 5311 */ 5312static void usc_start_receiver( struct mgsl_struct *info ) 5313{ 5314 u32 phys_addr; 5315 5316 if (debug_level >= DEBUG_LEVEL_ISR) 5317 printk("%s(%d):usc_start_receiver(%s)\n", 5318 __FILE__,__LINE__, info->device_name ); 5319 5320 mgsl_reset_rx_dma_buffers( info ); 5321 usc_stop_receiver( info ); 5322 5323 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5324 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5325 5326 if ( info->params.mode == MGSL_MODE_HDLC || 5327 info->params.mode == MGSL_MODE_RAW ) { 5328 /* DMA mode Transfers */ 5329 /* Program the DMA controller. */ 5330 /* Enable the DMA controller end of buffer interrupt. */ 5331 5332 /* program 16C32 with physical address of 1st DMA buffer entry */ 5333 phys_addr = info->rx_buffer_list[0].phys_entry; 5334 usc_OutDmaReg( info, NRARL, (u16)phys_addr ); 5335 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) ); 5336 5337 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5338 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS ); 5339 usc_EnableInterrupts( info, RECEIVE_STATUS ); 5340 5341 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */ 5342 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */ 5343 5344 usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 ); 5345 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) ); 5346 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 5347 if ( info->params.flags & HDLC_FLAG_AUTO_DCD ) 5348 usc_EnableReceiver(info,ENABLE_AUTO_DCD); 5349 else 5350 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5351 } else { 5352 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL); 5353 usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS); 5354 usc_EnableInterrupts(info, RECEIVE_DATA); 5355 5356 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5357 usc_RCmd( info, RCmd_EnterHuntmode ); 5358 5359 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5360 } 5361 5362 usc_OutReg( info, CCSR, 0x1020 ); 5363 5364 info->rx_enabled = true; 5365 5366} /* end of usc_start_receiver() */ 5367 5368/* usc_start_transmitter() 5369 * 5370 * Enable the USC transmitter and send a transmit frame if 5371 * one is loaded in the DMA buffers. 5372 * 5373 * Arguments: info pointer to device instance data 5374 * Return Value: None 5375 */ 5376static void usc_start_transmitter( struct mgsl_struct *info ) 5377{ 5378 u32 phys_addr; 5379 unsigned int FrameSize; 5380 5381 if (debug_level >= DEBUG_LEVEL_ISR) 5382 printk("%s(%d):usc_start_transmitter(%s)\n", 5383 __FILE__,__LINE__, info->device_name ); 5384 5385 if ( info->xmit_cnt ) { 5386 5387 /* If auto RTS enabled and RTS is inactive, then assert */ 5388 /* RTS and set a flag indicating that the driver should */ 5389 /* negate RTS when the transmission completes. */ 5390 5391 info->drop_rts_on_tx_done = false; 5392 5393 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) { 5394 usc_get_serial_signals( info ); 5395 if ( !(info->serial_signals & SerialSignal_RTS) ) { 5396 info->serial_signals |= SerialSignal_RTS; 5397 usc_set_serial_signals( info ); 5398 info->drop_rts_on_tx_done = true; 5399 } 5400 } 5401 5402 5403 if ( info->params.mode == MGSL_MODE_ASYNC ) { 5404 if ( !info->tx_active ) { 5405 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL); 5406 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA); 5407 usc_EnableInterrupts(info, TRANSMIT_DATA); 5408 usc_load_txfifo(info); 5409 } 5410 } else { 5411 /* Disable transmit DMA controller while programming. */ 5412 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 5413 5414 /* Transmit DMA buffer is loaded, so program USC */ 5415 /* to send the frame contained in the buffers. */ 5416 5417 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc; 5418 5419 /* if operating in Raw sync mode, reset the rcc component 5420 * of the tx dma buffer entry, otherwise, the serial controller 5421 * will send a closing sync char after this count. 5422 */ 5423 if ( info->params.mode == MGSL_MODE_RAW ) 5424 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0; 5425 5426 /* Program the Transmit Character Length Register (TCLR) */ 5427 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 5428 usc_OutReg( info, TCLR, (u16)FrameSize ); 5429 5430 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 5431 5432 /* Program the address of the 1st DMA Buffer Entry in linked list */ 5433 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry; 5434 usc_OutDmaReg( info, NTARL, (u16)phys_addr ); 5435 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) ); 5436 5437 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 5438 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 5439 usc_EnableInterrupts( info, TRANSMIT_STATUS ); 5440 5441 if ( info->params.mode == MGSL_MODE_RAW && 5442 info->num_tx_dma_buffers > 1 ) { 5443 /* When running external sync mode, attempt to 'stream' transmit */ 5444 /* by filling tx dma buffers as they become available. To do this */ 5445 /* we need to enable Tx DMA EOB Status interrupts : */ 5446 /* */ 5447 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */ 5448 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */ 5449 5450 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 ); 5451 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) ); 5452 } 5453 5454 /* Initialize Transmit DMA Channel */ 5455 usc_DmaCmd( info, DmaCmd_InitTxChannel ); 5456 5457 usc_TCmd( info, TCmd_SendFrame ); 5458 5459 mod_timer(&info->tx_timer, jiffies + 5460 msecs_to_jiffies(5000)); 5461 } 5462 info->tx_active = true; 5463 } 5464 5465 if ( !info->tx_enabled ) { 5466 info->tx_enabled = true; 5467 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) 5468 usc_EnableTransmitter(info,ENABLE_AUTO_CTS); 5469 else 5470 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL); 5471 } 5472 5473} /* end of usc_start_transmitter() */ 5474 5475/* usc_stop_transmitter() 5476 * 5477 * Stops the transmitter and DMA 5478 * 5479 * Arguments: info pointer to device isntance data 5480 * Return Value: None 5481 */ 5482static void usc_stop_transmitter( struct mgsl_struct *info ) 5483{ 5484 if (debug_level >= DEBUG_LEVEL_ISR) 5485 printk("%s(%d):usc_stop_transmitter(%s)\n", 5486 __FILE__,__LINE__, info->device_name ); 5487 5488 del_timer(&info->tx_timer); 5489 5490 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 5491 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA ); 5492 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA ); 5493 5494 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL); 5495 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 5496 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 5497 5498 info->tx_enabled = false; 5499 info->tx_active = false; 5500 5501} /* end of usc_stop_transmitter() */ 5502 5503/* usc_load_txfifo() 5504 * 5505 * Fill the transmit FIFO until the FIFO is full or 5506 * there is no more data to load. 5507 * 5508 * Arguments: info pointer to device extension (instance data) 5509 * Return Value: None 5510 */ 5511static void usc_load_txfifo( struct mgsl_struct *info ) 5512{ 5513 int Fifocount; 5514 u8 TwoBytes[2]; 5515 5516 if ( !info->xmit_cnt && !info->x_char ) 5517 return; 5518 5519 /* Select transmit FIFO status readback in TICR */ 5520 usc_TCmd( info, TCmd_SelectTicrTxFifostatus ); 5521 5522 /* load the Transmit FIFO until FIFOs full or all data sent */ 5523 5524 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) { 5525 /* there is more space in the transmit FIFO and */ 5526 /* there is more data in transmit buffer */ 5527 5528 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) { 5529 /* write a 16-bit word from transmit buffer to 16C32 */ 5530 5531 TwoBytes[0] = info->xmit_buf[info->xmit_tail++]; 5532 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5533 TwoBytes[1] = info->xmit_buf[info->xmit_tail++]; 5534 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5535 5536 outw( *((u16 *)TwoBytes), info->io_base + DATAREG); 5537 5538 info->xmit_cnt -= 2; 5539 info->icount.tx += 2; 5540 } else { 5541 /* only 1 byte left to transmit or 1 FIFO slot left */ 5542 5543 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY), 5544 info->io_base + CCAR ); 5545 5546 if (info->x_char) { 5547 /* transmit pending high priority char */ 5548 outw( info->x_char,info->io_base + CCAR ); 5549 info->x_char = 0; 5550 } else { 5551 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR ); 5552 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5553 info->xmit_cnt--; 5554 } 5555 info->icount.tx++; 5556 } 5557 } 5558 5559} /* end of usc_load_txfifo() */ 5560 5561/* usc_reset() 5562 * 5563 * Reset the adapter to a known state and prepare it for further use. 5564 * 5565 * Arguments: info pointer to device instance data 5566 * Return Value: None 5567 */ 5568static void usc_reset( struct mgsl_struct *info ) 5569{ 5570 int i; 5571 u32 readval; 5572 5573 /* Set BIT30 of Misc Control Register */ 5574 /* (Local Control Register 0x50) to force reset of USC. */ 5575 5576 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50); 5577 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28); 5578 5579 info->misc_ctrl_value |= BIT30; 5580 *MiscCtrl = info->misc_ctrl_value; 5581 5582 /* 5583 * Force at least 170ns delay before clearing reset bit. Each read from 5584 * LCR takes at least 30ns so 10 times for 300ns to be safe. 5585 */ 5586 for(i=0;i<10;i++) 5587 readval = *MiscCtrl; 5588 5589 info->misc_ctrl_value &= ~BIT30; 5590 *MiscCtrl = info->misc_ctrl_value; 5591 5592 *LCR0BRDR = BUS_DESCRIPTOR( 5593 1, // Write Strobe Hold (0-3) 5594 2, // Write Strobe Delay (0-3) 5595 2, // Read Strobe Delay (0-3) 5596 0, // NWDD (Write data-data) (0-3) 5597 4, // NWAD (Write Addr-data) (0-31) 5598 0, // NXDA (Read/Write Data-Addr) (0-3) 5599 0, // NRDD (Read Data-Data) (0-3) 5600 5 // NRAD (Read Addr-Data) (0-31) 5601 ); 5602 5603 info->mbre_bit = 0; 5604 info->loopback_bits = 0; 5605 info->usc_idle_mode = 0; 5606 5607 /* 5608 * Program the Bus Configuration Register (BCR) 5609 * 5610 * <15> 0 Don't use separate address 5611 * <14..6> 0 reserved 5612 * <5..4> 00 IAckmode = Default, don't care 5613 * <3> 1 Bus Request Totem Pole output 5614 * <2> 1 Use 16 Bit data bus 5615 * <1> 0 IRQ Totem Pole output 5616 * <0> 0 Don't Shift Right Addr 5617 * 5618 * 0000 0000 0000 1100 = 0x000c 5619 * 5620 * By writing to io_base + SDPIN the Wait/Ack pin is 5621 * programmed to work as a Wait pin. 5622 */ 5623 5624 outw( 0x000c,info->io_base + SDPIN ); 5625 5626 5627 outw( 0,info->io_base ); 5628 outw( 0,info->io_base + CCAR ); 5629 5630 /* select little endian byte ordering */ 5631 usc_RTCmd( info, RTCmd_SelectLittleEndian ); 5632 5633 5634 /* Port Control Register (PCR) 5635 * 5636 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled) 5637 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled) 5638 * <11..10> 00 Port 5 is Input (No Connect, Don't Care) 5639 * <9..8> 00 Port 4 is Input (No Connect, Don't Care) 5640 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled ) 5641 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled ) 5642 * <3..2> 01 Port 1 is Input (Dedicated RxC) 5643 * <1..0> 01 Port 0 is Input (Dedicated TxC) 5644 * 5645 * 1111 0000 1111 0101 = 0xf0f5 5646 */ 5647 5648 usc_OutReg( info, PCR, 0xf0f5 ); 5649 5650 5651 /* 5652 * Input/Output Control Register 5653 * 5654 * <15..14> 00 CTS is active low input 5655 * <13..12> 00 DCD is active low input 5656 * <11..10> 00 TxREQ pin is input (DSR) 5657 * <9..8> 00 RxREQ pin is input (RI) 5658 * <7..6> 00 TxD is output (Transmit Data) 5659 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock) 5660 * <2..0> 100 RxC is Output (drive with BRG0) 5661 * 5662 * 0000 0000 0000 0100 = 0x0004 5663 */ 5664 5665 usc_OutReg( info, IOCR, 0x0004 ); 5666 5667} /* end of usc_reset() */ 5668 5669/* usc_set_async_mode() 5670 * 5671 * Program adapter for asynchronous communications. 5672 * 5673 * Arguments: info pointer to device instance data 5674 * Return Value: None 5675 */ 5676static void usc_set_async_mode( struct mgsl_struct *info ) 5677{ 5678 u16 RegValue; 5679 5680 /* disable interrupts while programming USC */ 5681 usc_DisableMasterIrqBit( info ); 5682 5683 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */ 5684 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */ 5685 5686 usc_loopback_frame( info ); 5687 5688 /* Channel mode Register (CMR) 5689 * 5690 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit 5691 * <13..12> 00 00 = 16X Clock 5692 * <11..8> 0000 Transmitter mode = Asynchronous 5693 * <7..6> 00 reserved? 5694 * <5..4> 00 Rx Sub modes, 00 = 16X Clock 5695 * <3..0> 0000 Receiver mode = Asynchronous 5696 * 5697 * 0000 0000 0000 0000 = 0x0 5698 */ 5699 5700 RegValue = 0; 5701 if ( info->params.stop_bits != 1 ) 5702 RegValue |= BIT14; 5703 usc_OutReg( info, CMR, RegValue ); 5704 5705 5706 /* Receiver mode Register (RMR) 5707 * 5708 * <15..13> 000 encoding = None 5709 * <12..08> 00000 reserved (Sync Only) 5710 * <7..6> 00 Even parity 5711 * <5> 0 parity disabled 5712 * <4..2> 000 Receive Char Length = 8 bits 5713 * <1..0> 00 Disable Receiver 5714 * 5715 * 0000 0000 0000 0000 = 0x0 5716 */ 5717 5718 RegValue = 0; 5719 5720 if ( info->params.data_bits != 8 ) 5721 RegValue |= BIT4 | BIT3 | BIT2; 5722 5723 if ( info->params.parity != ASYNC_PARITY_NONE ) { 5724 RegValue |= BIT5; 5725 if ( info->params.parity != ASYNC_PARITY_ODD ) 5726 RegValue |= BIT6; 5727 } 5728 5729 usc_OutReg( info, RMR, RegValue ); 5730 5731 5732 /* Set IRQ trigger level */ 5733 5734 usc_RCmd( info, RCmd_SelectRicrIntLevel ); 5735 5736 5737 /* Receive Interrupt Control Register (RICR) 5738 * 5739 * <15..8> ? RxFIFO IRQ Request Level 5740 * 5741 * Note: For async mode the receive FIFO level must be set 5742 * to 0 to avoid the situation where the FIFO contains fewer bytes 5743 * than the trigger level and no more data is expected. 5744 * 5745 * <7> 0 Exited Hunt IA (Interrupt Arm) 5746 * <6> 0 Idle Received IA 5747 * <5> 0 Break/Abort IA 5748 * <4> 0 Rx Bound IA 5749 * <3> 0 Queued status reflects oldest byte in FIFO 5750 * <2> 0 Abort/PE IA 5751 * <1> 0 Rx Overrun IA 5752 * <0> 0 Select TC0 value for readback 5753 * 5754 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB) 5755 */ 5756 5757 usc_OutReg( info, RICR, 0x0000 ); 5758 5759 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5760 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 5761 5762 5763 /* Transmit mode Register (TMR) 5764 * 5765 * <15..13> 000 encoding = None 5766 * <12..08> 00000 reserved (Sync Only) 5767 * <7..6> 00 Transmit parity Even 5768 * <5> 0 Transmit parity Disabled 5769 * <4..2> 000 Tx Char Length = 8 bits 5770 * <1..0> 00 Disable Transmitter 5771 * 5772 * 0000 0000 0000 0000 = 0x0 5773 */ 5774 5775 RegValue = 0; 5776 5777 if ( info->params.data_bits != 8 ) 5778 RegValue |= BIT4 | BIT3 | BIT2; 5779 5780 if ( info->params.parity != ASYNC_PARITY_NONE ) { 5781 RegValue |= BIT5; 5782 if ( info->params.parity != ASYNC_PARITY_ODD ) 5783 RegValue |= BIT6; 5784 } 5785 5786 usc_OutReg( info, TMR, RegValue ); 5787 5788 usc_set_txidle( info ); 5789 5790 5791 /* Set IRQ trigger level */ 5792 5793 usc_TCmd( info, TCmd_SelectTicrIntLevel ); 5794 5795 5796 /* Transmit Interrupt Control Register (TICR) 5797 * 5798 * <15..8> ? Transmit FIFO IRQ Level 5799 * <7> 0 Present IA (Interrupt Arm) 5800 * <6> 1 Idle Sent IA 5801 * <5> 0 Abort Sent IA 5802 * <4> 0 EOF/EOM Sent IA 5803 * <3> 0 CRC Sent IA 5804 * <2> 0 1 = Wait for SW Trigger to Start Frame 5805 * <1> 0 Tx Underrun IA 5806 * <0> 0 TC0 constant on read back 5807 * 5808 * 0000 0000 0100 0000 = 0x0040 5809 */ 5810 5811 usc_OutReg( info, TICR, 0x1f40 ); 5812 5813 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 5814 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 5815 5816 usc_enable_async_clock( info, info->params.data_rate ); 5817 5818 5819 /* Channel Control/status Register (CCSR) 5820 * 5821 * <15> X RCC FIFO Overflow status (RO) 5822 * <14> X RCC FIFO Not Empty status (RO) 5823 * <13> 0 1 = Clear RCC FIFO (WO) 5824 * <12> X DPLL in Sync status (RO) 5825 * <11> X DPLL 2 Missed Clocks status (RO) 5826 * <10> X DPLL 1 Missed Clock status (RO) 5827 * <9..8> 00 DPLL Resync on rising and falling edges (RW) 5828 * <7> X SDLC Loop On status (RO) 5829 * <6> X SDLC Loop Send status (RO) 5830 * <5> 1 Bypass counters for TxClk and RxClk (RW) 5831 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW) 5832 * <1..0> 00 reserved 5833 * 5834 * 0000 0000 0010 0000 = 0x0020 5835 */ 5836 5837 usc_OutReg( info, CCSR, 0x0020 ); 5838 5839 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA + 5840 RECEIVE_DATA + RECEIVE_STATUS ); 5841 5842 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA + 5843 RECEIVE_DATA + RECEIVE_STATUS ); 5844 5845 usc_EnableMasterIrqBit( info ); 5846 5847 if (info->params.loopback) { 5848 info->loopback_bits = 0x300; 5849 outw(0x0300, info->io_base + CCAR); 5850 } 5851 5852} /* end of usc_set_async_mode() */ 5853 5854/* usc_loopback_frame() 5855 * 5856 * Loop back a small (2 byte) dummy SDLC frame. 5857 * Interrupts and DMA are NOT used. The purpose of this is to 5858 * clear any 'stale' status info left over from running in async mode. 5859 * 5860 * The 16C32 shows the strange behaviour of marking the 1st 5861 * received SDLC frame with a CRC error even when there is no 5862 * CRC error. To get around this a small dummy from of 2 bytes 5863 * is looped back when switching from async to sync mode. 5864 * 5865 * Arguments: info pointer to device instance data 5866 * Return Value: None 5867 */ 5868static void usc_loopback_frame( struct mgsl_struct *info ) 5869{ 5870 int i; 5871 unsigned long oldmode = info->params.mode; 5872 5873 info->params.mode = MGSL_MODE_HDLC; 5874 5875 usc_DisableMasterIrqBit( info ); 5876 5877 usc_set_sdlc_mode( info ); 5878 usc_enable_loopback( info, 1 ); 5879 5880 /* Write 16-bit Time Constant for BRG0 */ 5881 usc_OutReg( info, TC0R, 0 ); 5882 5883 /* Channel Control Register (CCR) 5884 * 5885 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs) 5886 * <13> 0 Trigger Tx on SW Command Disabled 5887 * <12> 0 Flag Preamble Disabled 5888 * <11..10> 00 Preamble Length = 8-Bits 5889 * <9..8> 01 Preamble Pattern = flags 5890 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs) 5891 * <5> 0 Trigger Rx on SW Command Disabled 5892 * <4..0> 0 reserved 5893 * 5894 * 0000 0001 0000 0000 = 0x0100 5895 */ 5896 5897 usc_OutReg( info, CCR, 0x0100 ); 5898 5899 /* SETUP RECEIVER */ 5900 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5901 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5902 5903 /* SETUP TRANSMITTER */ 5904 /* Program the Transmit Character Length Register (TCLR) */ 5905 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 5906 usc_OutReg( info, TCLR, 2 ); 5907 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 5908 5909 /* unlatch Tx status bits, and start transmit channel. */ 5910 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL); 5911 outw(0,info->io_base + DATAREG); 5912 5913 /* ENABLE TRANSMITTER */ 5914 usc_TCmd( info, TCmd_SendFrame ); 5915 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL); 5916 5917 /* WAIT FOR RECEIVE COMPLETE */ 5918 for (i=0 ; i<1000 ; i++) 5919 if (usc_InReg( info, RCSR ) & (BIT8 | BIT4 | BIT3 | BIT1)) 5920 break; 5921 5922 /* clear Internal Data loopback mode */ 5923 usc_enable_loopback(info, 0); 5924 5925 usc_EnableMasterIrqBit(info); 5926 5927 info->params.mode = oldmode; 5928 5929} /* end of usc_loopback_frame() */ 5930 5931/* usc_set_sync_mode() Programs the USC for SDLC communications. 5932 * 5933 * Arguments: info pointer to adapter info structure 5934 * Return Value: None 5935 */ 5936static void usc_set_sync_mode( struct mgsl_struct *info ) 5937{ 5938 usc_loopback_frame( info ); 5939 usc_set_sdlc_mode( info ); 5940 5941 usc_enable_aux_clock(info, info->params.clock_speed); 5942 5943 if (info->params.loopback) 5944 usc_enable_loopback(info,1); 5945 5946} /* end of mgsl_set_sync_mode() */ 5947 5948/* usc_set_txidle() Set the HDLC idle mode for the transmitter. 5949 * 5950 * Arguments: info pointer to device instance data 5951 * Return Value: None 5952 */ 5953static void usc_set_txidle( struct mgsl_struct *info ) 5954{ 5955 u16 usc_idle_mode = IDLEMODE_FLAGS; 5956 5957 /* Map API idle mode to USC register bits */ 5958 5959 switch( info->idle_mode ){ 5960 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break; 5961 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break; 5962 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break; 5963 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break; 5964 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break; 5965 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break; 5966 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break; 5967 } 5968 5969 info->usc_idle_mode = usc_idle_mode; 5970 //usc_OutReg(info, TCSR, usc_idle_mode); 5971 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */ 5972 info->tcsr_value += usc_idle_mode; 5973 usc_OutReg(info, TCSR, info->tcsr_value); 5974 5975 /* 5976 * if SyncLink WAN adapter is running in external sync mode, the 5977 * transmitter has been set to Monosync in order to try to mimic 5978 * a true raw outbound bit stream. Monosync still sends an open/close 5979 * sync char at the start/end of a frame. Try to match those sync 5980 * patterns to the idle mode set here 5981 */ 5982 if ( info->params.mode == MGSL_MODE_RAW ) { 5983 unsigned char syncpat = 0; 5984 switch( info->idle_mode ) { 5985 case HDLC_TXIDLE_FLAGS: 5986 syncpat = 0x7e; 5987 break; 5988 case HDLC_TXIDLE_ALT_ZEROS_ONES: 5989 syncpat = 0x55; 5990 break; 5991 case HDLC_TXIDLE_ZEROS: 5992 case HDLC_TXIDLE_SPACE: 5993 syncpat = 0x00; 5994 break; 5995 case HDLC_TXIDLE_ONES: 5996 case HDLC_TXIDLE_MARK: 5997 syncpat = 0xff; 5998 break; 5999 case HDLC_TXIDLE_ALT_MARK_SPACE: 6000 syncpat = 0xaa; 6001 break; 6002 } 6003 6004 usc_SetTransmitSyncChars(info,syncpat,syncpat); 6005 } 6006 6007} /* end of usc_set_txidle() */ 6008 6009/* usc_get_serial_signals() 6010 * 6011 * Query the adapter for the state of the V24 status (input) signals. 6012 * 6013 * Arguments: info pointer to device instance data 6014 * Return Value: None 6015 */ 6016static void usc_get_serial_signals( struct mgsl_struct *info ) 6017{ 6018 u16 status; 6019 6020 /* clear all serial signals except RTS and DTR */ 6021 info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR; 6022 6023 /* Read the Misc Interrupt status Register (MISR) to get */ 6024 /* the V24 status signals. */ 6025 6026 status = usc_InReg( info, MISR ); 6027 6028 /* set serial signal bits to reflect MISR */ 6029 6030 if ( status & MISCSTATUS_CTS ) 6031 info->serial_signals |= SerialSignal_CTS; 6032 6033 if ( status & MISCSTATUS_DCD ) 6034 info->serial_signals |= SerialSignal_DCD; 6035 6036 if ( status & MISCSTATUS_RI ) 6037 info->serial_signals |= SerialSignal_RI; 6038 6039 if ( status & MISCSTATUS_DSR ) 6040 info->serial_signals |= SerialSignal_DSR; 6041 6042} /* end of usc_get_serial_signals() */ 6043 6044/* usc_set_serial_signals() 6045 * 6046 * Set the state of RTS and DTR based on contents of 6047 * serial_signals member of device extension. 6048 * 6049 * Arguments: info pointer to device instance data 6050 * Return Value: None 6051 */ 6052static void usc_set_serial_signals( struct mgsl_struct *info ) 6053{ 6054 u16 Control; 6055 unsigned char V24Out = info->serial_signals; 6056 6057 /* get the current value of the Port Control Register (PCR) */ 6058 6059 Control = usc_InReg( info, PCR ); 6060 6061 if ( V24Out & SerialSignal_RTS ) 6062 Control &= ~(BIT6); 6063 else 6064 Control |= BIT6; 6065 6066 if ( V24Out & SerialSignal_DTR ) 6067 Control &= ~(BIT4); 6068 else 6069 Control |= BIT4; 6070 6071 usc_OutReg( info, PCR, Control ); 6072 6073} /* end of usc_set_serial_signals() */ 6074 6075/* usc_enable_async_clock() 6076 * 6077 * Enable the async clock at the specified frequency. 6078 * 6079 * Arguments: info pointer to device instance data 6080 * data_rate data rate of clock in bps 6081 * 0 disables the AUX clock. 6082 * Return Value: None 6083 */ 6084static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate ) 6085{ 6086 if ( data_rate ) { 6087 /* 6088 * Clock mode Control Register (CMCR) 6089 * 6090 * <15..14> 00 counter 1 Disabled 6091 * <13..12> 00 counter 0 Disabled 6092 * <11..10> 11 BRG1 Input is TxC Pin 6093 * <9..8> 11 BRG0 Input is TxC Pin 6094 * <7..6> 01 DPLL Input is BRG1 Output 6095 * <5..3> 100 TxCLK comes from BRG0 6096 * <2..0> 100 RxCLK comes from BRG0 6097 * 6098 * 0000 1111 0110 0100 = 0x0f64 6099 */ 6100 6101 usc_OutReg( info, CMCR, 0x0f64 ); 6102 6103 6104 /* 6105 * Write 16-bit Time Constant for BRG0 6106 * Time Constant = (ClkSpeed / data_rate) - 1 6107 * ClkSpeed = 921600 (ISA), 691200 (PCI) 6108 */ 6109 6110 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) ); 6111 6112 /* 6113 * Hardware Configuration Register (HCR) 6114 * Clear Bit 1, BRG0 mode = Continuous 6115 * Set Bit 0 to enable BRG0. 6116 */ 6117 6118 usc_OutReg( info, HCR, 6119 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 6120 6121 6122 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 6123 6124 usc_OutReg( info, IOCR, 6125 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) ); 6126 } else { 6127 /* data rate == 0 so turn off BRG0 */ 6128 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) ); 6129 } 6130 6131} /* end of usc_enable_async_clock() */ 6132 6133/* 6134 * Buffer Structures: 6135 * 6136 * Normal memory access uses virtual addresses that can make discontiguous 6137 * physical memory pages appear to be contiguous in the virtual address 6138 * space (the processors memory mapping handles the conversions). 6139 * 6140 * DMA transfers require physically contiguous memory. This is because 6141 * the DMA system controller and DMA bus masters deal with memory using 6142 * only physical addresses. 6143 * 6144 * This causes a problem under Windows NT when large DMA buffers are 6145 * needed. Fragmentation of the nonpaged pool prevents allocations of 6146 * physically contiguous buffers larger than the PAGE_SIZE. 6147 * 6148 * However the 16C32 supports Bus Master Scatter/Gather DMA which 6149 * allows DMA transfers to physically discontiguous buffers. Information 6150 * about each data transfer buffer is contained in a memory structure 6151 * called a 'buffer entry'. A list of buffer entries is maintained 6152 * to track and control the use of the data transfer buffers. 6153 * 6154 * To support this strategy we will allocate sufficient PAGE_SIZE 6155 * contiguous memory buffers to allow for the total required buffer 6156 * space. 6157 * 6158 * The 16C32 accesses the list of buffer entries using Bus Master 6159 * DMA. Control information is read from the buffer entries by the 6160 * 16C32 to control data transfers. status information is written to 6161 * the buffer entries by the 16C32 to indicate the status of completed 6162 * transfers. 6163 * 6164 * The CPU writes control information to the buffer entries to control 6165 * the 16C32 and reads status information from the buffer entries to 6166 * determine information about received and transmitted frames. 6167 * 6168 * Because the CPU and 16C32 (adapter) both need simultaneous access 6169 * to the buffer entries, the buffer entry memory is allocated with 6170 * HalAllocateCommonBuffer(). This restricts the size of the buffer 6171 * entry list to PAGE_SIZE. 6172 * 6173 * The actual data buffers on the other hand will only be accessed 6174 * by the CPU or the adapter but not by both simultaneously. This allows 6175 * Scatter/Gather packet based DMA procedures for using physically 6176 * discontiguous pages. 6177 */ 6178 6179/* 6180 * mgsl_reset_tx_dma_buffers() 6181 * 6182 * Set the count for all transmit buffers to 0 to indicate the 6183 * buffer is available for use and set the current buffer to the 6184 * first buffer. This effectively makes all buffers free and 6185 * discards any data in buffers. 6186 * 6187 * Arguments: info pointer to device instance data 6188 * Return Value: None 6189 */ 6190static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info ) 6191{ 6192 unsigned int i; 6193 6194 for ( i = 0; i < info->tx_buffer_count; i++ ) { 6195 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0; 6196 } 6197 6198 info->current_tx_buffer = 0; 6199 info->start_tx_dma_buffer = 0; 6200 info->tx_dma_buffers_used = 0; 6201 6202 info->get_tx_holding_index = 0; 6203 info->put_tx_holding_index = 0; 6204 info->tx_holding_count = 0; 6205 6206} /* end of mgsl_reset_tx_dma_buffers() */ 6207 6208/* 6209 * num_free_tx_dma_buffers() 6210 * 6211 * returns the number of free tx dma buffers available 6212 * 6213 * Arguments: info pointer to device instance data 6214 * Return Value: number of free tx dma buffers 6215 */ 6216static int num_free_tx_dma_buffers(struct mgsl_struct *info) 6217{ 6218 return info->tx_buffer_count - info->tx_dma_buffers_used; 6219} 6220 6221/* 6222 * mgsl_reset_rx_dma_buffers() 6223 * 6224 * Set the count for all receive buffers to DMABUFFERSIZE 6225 * and set the current buffer to the first buffer. This effectively 6226 * makes all buffers free and discards any data in buffers. 6227 * 6228 * Arguments: info pointer to device instance data 6229 * Return Value: None 6230 */ 6231static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info ) 6232{ 6233 unsigned int i; 6234 6235 for ( i = 0; i < info->rx_buffer_count; i++ ) { 6236 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE; 6237// info->rx_buffer_list[i].count = DMABUFFERSIZE; 6238// info->rx_buffer_list[i].status = 0; 6239 } 6240 6241 info->current_rx_buffer = 0; 6242 6243} /* end of mgsl_reset_rx_dma_buffers() */ 6244 6245/* 6246 * mgsl_free_rx_frame_buffers() 6247 * 6248 * Free the receive buffers used by a received SDLC 6249 * frame such that the buffers can be reused. 6250 * 6251 * Arguments: 6252 * 6253 * info pointer to device instance data 6254 * StartIndex index of 1st receive buffer of frame 6255 * EndIndex index of last receive buffer of frame 6256 * 6257 * Return Value: None 6258 */ 6259static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex ) 6260{ 6261 bool Done = false; 6262 DMABUFFERENTRY *pBufEntry; 6263 unsigned int Index; 6264 6265 /* Starting with 1st buffer entry of the frame clear the status */ 6266 /* field and set the count field to DMA Buffer Size. */ 6267 6268 Index = StartIndex; 6269 6270 while( !Done ) { 6271 pBufEntry = &(info->rx_buffer_list[Index]); 6272 6273 if ( Index == EndIndex ) { 6274 /* This is the last buffer of the frame! */ 6275 Done = true; 6276 } 6277 6278 /* reset current buffer for reuse */ 6279// pBufEntry->status = 0; 6280// pBufEntry->count = DMABUFFERSIZE; 6281 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE; 6282 6283 /* advance to next buffer entry in linked list */ 6284 Index++; 6285 if ( Index == info->rx_buffer_count ) 6286 Index = 0; 6287 } 6288 6289 /* set current buffer to next buffer after last buffer of frame */ 6290 info->current_rx_buffer = Index; 6291 6292} /* end of free_rx_frame_buffers() */ 6293 6294/* mgsl_get_rx_frame() 6295 * 6296 * This function attempts to return a received SDLC frame from the 6297 * receive DMA buffers. Only frames received without errors are returned. 6298 * 6299 * Arguments: info pointer to device extension 6300 * Return Value: true if frame returned, otherwise false 6301 */ 6302static bool mgsl_get_rx_frame(struct mgsl_struct *info) 6303{ 6304 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */ 6305 unsigned short status; 6306 DMABUFFERENTRY *pBufEntry; 6307 unsigned int framesize = 0; 6308 bool ReturnCode = false; 6309 unsigned long flags; 6310 struct tty_struct *tty = info->port.tty; 6311 bool return_frame = false; 6312 6313 /* 6314 * current_rx_buffer points to the 1st buffer of the next available 6315 * receive frame. To find the last buffer of the frame look for 6316 * a non-zero status field in the buffer entries. (The status 6317 * field is set by the 16C32 after completing a receive frame. 6318 */ 6319 6320 StartIndex = EndIndex = info->current_rx_buffer; 6321 6322 while( !info->rx_buffer_list[EndIndex].status ) { 6323 /* 6324 * If the count field of the buffer entry is non-zero then 6325 * this buffer has not been used. (The 16C32 clears the count 6326 * field when it starts using the buffer.) If an unused buffer 6327 * is encountered then there are no frames available. 6328 */ 6329 6330 if ( info->rx_buffer_list[EndIndex].count ) 6331 goto Cleanup; 6332 6333 /* advance to next buffer entry in linked list */ 6334 EndIndex++; 6335 if ( EndIndex == info->rx_buffer_count ) 6336 EndIndex = 0; 6337 6338 /* if entire list searched then no frame available */ 6339 if ( EndIndex == StartIndex ) { 6340 /* If this occurs then something bad happened, 6341 * all buffers have been 'used' but none mark 6342 * the end of a frame. Reset buffers and receiver. 6343 */ 6344 6345 if ( info->rx_enabled ){ 6346 spin_lock_irqsave(&info->irq_spinlock,flags); 6347 usc_start_receiver(info); 6348 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6349 } 6350 goto Cleanup; 6351 } 6352 } 6353 6354 6355 /* check status of receive frame */ 6356 6357 status = info->rx_buffer_list[EndIndex].status; 6358 6359 if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN | 6360 RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) { 6361 if ( status & RXSTATUS_SHORT_FRAME ) 6362 info->icount.rxshort++; 6363 else if ( status & RXSTATUS_ABORT ) 6364 info->icount.rxabort++; 6365 else if ( status & RXSTATUS_OVERRUN ) 6366 info->icount.rxover++; 6367 else { 6368 info->icount.rxcrc++; 6369 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) 6370 return_frame = true; 6371 } 6372 framesize = 0; 6373#if SYNCLINK_GENERIC_HDLC 6374 { 6375 info->netdev->stats.rx_errors++; 6376 info->netdev->stats.rx_frame_errors++; 6377 } 6378#endif 6379 } else 6380 return_frame = true; 6381 6382 if ( return_frame ) { 6383 /* receive frame has no errors, get frame size. 6384 * The frame size is the starting value of the RCC (which was 6385 * set to 0xffff) minus the ending value of the RCC (decremented 6386 * once for each receive character) minus 2 for the 16-bit CRC. 6387 */ 6388 6389 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc; 6390 6391 /* adjust frame size for CRC if any */ 6392 if ( info->params.crc_type == HDLC_CRC_16_CCITT ) 6393 framesize -= 2; 6394 else if ( info->params.crc_type == HDLC_CRC_32_CCITT ) 6395 framesize -= 4; 6396 } 6397 6398 if ( debug_level >= DEBUG_LEVEL_BH ) 6399 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n", 6400 __FILE__,__LINE__,info->device_name,status,framesize); 6401 6402 if ( debug_level >= DEBUG_LEVEL_DATA ) 6403 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr, 6404 min_t(int, framesize, DMABUFFERSIZE),0); 6405 6406 if (framesize) { 6407 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) && 6408 ((framesize+1) > info->max_frame_size) ) || 6409 (framesize > info->max_frame_size) ) 6410 info->icount.rxlong++; 6411 else { 6412 /* copy dma buffer(s) to contiguous intermediate buffer */ 6413 int copy_count = framesize; 6414 int index = StartIndex; 6415 unsigned char *ptmp = info->intermediate_rxbuffer; 6416 6417 if ( !(status & RXSTATUS_CRC_ERROR)) 6418 info->icount.rxok++; 6419 6420 while(copy_count) { 6421 int partial_count; 6422 if ( copy_count > DMABUFFERSIZE ) 6423 partial_count = DMABUFFERSIZE; 6424 else 6425 partial_count = copy_count; 6426 6427 pBufEntry = &(info->rx_buffer_list[index]); 6428 memcpy( ptmp, pBufEntry->virt_addr, partial_count ); 6429 ptmp += partial_count; 6430 copy_count -= partial_count; 6431 6432 if ( ++index == info->rx_buffer_count ) 6433 index = 0; 6434 } 6435 6436 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) { 6437 ++framesize; 6438 *ptmp = (status & RXSTATUS_CRC_ERROR ? 6439 RX_CRC_ERROR : 6440 RX_OK); 6441 6442 if ( debug_level >= DEBUG_LEVEL_DATA ) 6443 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n", 6444 __FILE__,__LINE__,info->device_name, 6445 *ptmp); 6446 } 6447 6448#if SYNCLINK_GENERIC_HDLC 6449 if (info->netcount) 6450 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize); 6451 else 6452#endif 6453 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize); 6454 } 6455 } 6456 /* Free the buffers used by this frame. */ 6457 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex ); 6458 6459 ReturnCode = true; 6460 6461Cleanup: 6462 6463 if ( info->rx_enabled && info->rx_overflow ) { 6464 /* The receiver needs to restarted because of 6465 * a receive overflow (buffer or FIFO). If the 6466 * receive buffers are now empty, then restart receiver. 6467 */ 6468 6469 if ( !info->rx_buffer_list[EndIndex].status && 6470 info->rx_buffer_list[EndIndex].count ) { 6471 spin_lock_irqsave(&info->irq_spinlock,flags); 6472 usc_start_receiver(info); 6473 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6474 } 6475 } 6476 6477 return ReturnCode; 6478 6479} /* end of mgsl_get_rx_frame() */ 6480 6481/* mgsl_get_raw_rx_frame() 6482 * 6483 * This function attempts to return a received frame from the 6484 * receive DMA buffers when running in external loop mode. In this mode, 6485 * we will return at most one DMABUFFERSIZE frame to the application. 6486 * The USC receiver is triggering off of DCD going active to start a new 6487 * frame, and DCD going inactive to terminate the frame (similar to 6488 * processing a closing flag character). 6489 * 6490 * In this routine, we will return DMABUFFERSIZE "chunks" at a time. 6491 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero 6492 * status field and the RCC field will indicate the length of the 6493 * entire received frame. We take this RCC field and get the modulus 6494 * of RCC and DMABUFFERSIZE to determine if number of bytes in the 6495 * last Rx DMA buffer and return that last portion of the frame. 6496 * 6497 * Arguments: info pointer to device extension 6498 * Return Value: true if frame returned, otherwise false 6499 */ 6500static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info) 6501{ 6502 unsigned int CurrentIndex, NextIndex; 6503 unsigned short status; 6504 DMABUFFERENTRY *pBufEntry; 6505 unsigned int framesize = 0; 6506 bool ReturnCode = false; 6507 unsigned long flags; 6508 struct tty_struct *tty = info->port.tty; 6509 6510 /* 6511 * current_rx_buffer points to the 1st buffer of the next available 6512 * receive frame. The status field is set by the 16C32 after 6513 * completing a receive frame. If the status field of this buffer 6514 * is zero, either the USC is still filling this buffer or this 6515 * is one of a series of buffers making up a received frame. 6516 * 6517 * If the count field of this buffer is zero, the USC is either 6518 * using this buffer or has used this buffer. Look at the count 6519 * field of the next buffer. If that next buffer's count is 6520 * non-zero, the USC is still actively using the current buffer. 6521 * Otherwise, if the next buffer's count field is zero, the 6522 * current buffer is complete and the USC is using the next 6523 * buffer. 6524 */ 6525 CurrentIndex = NextIndex = info->current_rx_buffer; 6526 ++NextIndex; 6527 if ( NextIndex == info->rx_buffer_count ) 6528 NextIndex = 0; 6529 6530 if ( info->rx_buffer_list[CurrentIndex].status != 0 || 6531 (info->rx_buffer_list[CurrentIndex].count == 0 && 6532 info->rx_buffer_list[NextIndex].count == 0)) { 6533 /* 6534 * Either the status field of this dma buffer is non-zero 6535 * (indicating the last buffer of a receive frame) or the next 6536 * buffer is marked as in use -- implying this buffer is complete 6537 * and an intermediate buffer for this received frame. 6538 */ 6539 6540 status = info->rx_buffer_list[CurrentIndex].status; 6541 6542 if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN | 6543 RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) { 6544 if ( status & RXSTATUS_SHORT_FRAME ) 6545 info->icount.rxshort++; 6546 else if ( status & RXSTATUS_ABORT ) 6547 info->icount.rxabort++; 6548 else if ( status & RXSTATUS_OVERRUN ) 6549 info->icount.rxover++; 6550 else 6551 info->icount.rxcrc++; 6552 framesize = 0; 6553 } else { 6554 /* 6555 * A receive frame is available, get frame size and status. 6556 * 6557 * The frame size is the starting value of the RCC (which was 6558 * set to 0xffff) minus the ending value of the RCC (decremented 6559 * once for each receive character) minus 2 or 4 for the 16-bit 6560 * or 32-bit CRC. 6561 * 6562 * If the status field is zero, this is an intermediate buffer. 6563 * It's size is 4K. 6564 * 6565 * If the DMA Buffer Entry's Status field is non-zero, the 6566 * receive operation completed normally (ie: DCD dropped). The 6567 * RCC field is valid and holds the received frame size. 6568 * It is possible that the RCC field will be zero on a DMA buffer 6569 * entry with a non-zero status. This can occur if the total 6570 * frame size (number of bytes between the time DCD goes active 6571 * to the time DCD goes inactive) exceeds 65535 bytes. In this 6572 * case the 16C32 has underrun on the RCC count and appears to 6573 * stop updating this counter to let us know the actual received 6574 * frame size. If this happens (non-zero status and zero RCC), 6575 * simply return the entire RxDMA Buffer 6576 */ 6577 if ( status ) { 6578 /* 6579 * In the event that the final RxDMA Buffer is 6580 * terminated with a non-zero status and the RCC 6581 * field is zero, we interpret this as the RCC 6582 * having underflowed (received frame > 65535 bytes). 6583 * 6584 * Signal the event to the user by passing back 6585 * a status of RxStatus_CrcError returning the full 6586 * buffer and let the app figure out what data is 6587 * actually valid 6588 */ 6589 if ( info->rx_buffer_list[CurrentIndex].rcc ) 6590 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc; 6591 else 6592 framesize = DMABUFFERSIZE; 6593 } 6594 else 6595 framesize = DMABUFFERSIZE; 6596 } 6597 6598 if ( framesize > DMABUFFERSIZE ) { 6599 /* 6600 * if running in raw sync mode, ISR handler for 6601 * End Of Buffer events terminates all buffers at 4K. 6602 * If this frame size is said to be >4K, get the 6603 * actual number of bytes of the frame in this buffer. 6604 */ 6605 framesize = framesize % DMABUFFERSIZE; 6606 } 6607 6608 6609 if ( debug_level >= DEBUG_LEVEL_BH ) 6610 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n", 6611 __FILE__,__LINE__,info->device_name,status,framesize); 6612 6613 if ( debug_level >= DEBUG_LEVEL_DATA ) 6614 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr, 6615 min_t(int, framesize, DMABUFFERSIZE),0); 6616 6617 if (framesize) { 6618 /* copy dma buffer(s) to contiguous intermediate buffer */ 6619 /* NOTE: we never copy more than DMABUFFERSIZE bytes */ 6620 6621 pBufEntry = &(info->rx_buffer_list[CurrentIndex]); 6622 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize); 6623 info->icount.rxok++; 6624 6625 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize); 6626 } 6627 6628 /* Free the buffers used by this frame. */ 6629 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex ); 6630 6631 ReturnCode = true; 6632 } 6633 6634 6635 if ( info->rx_enabled && info->rx_overflow ) { 6636 /* The receiver needs to restarted because of 6637 * a receive overflow (buffer or FIFO). If the 6638 * receive buffers are now empty, then restart receiver. 6639 */ 6640 6641 if ( !info->rx_buffer_list[CurrentIndex].status && 6642 info->rx_buffer_list[CurrentIndex].count ) { 6643 spin_lock_irqsave(&info->irq_spinlock,flags); 6644 usc_start_receiver(info); 6645 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6646 } 6647 } 6648 6649 return ReturnCode; 6650 6651} /* end of mgsl_get_raw_rx_frame() */ 6652 6653/* mgsl_load_tx_dma_buffer() 6654 * 6655 * Load the transmit DMA buffer with the specified data. 6656 * 6657 * Arguments: 6658 * 6659 * info pointer to device extension 6660 * Buffer pointer to buffer containing frame to load 6661 * BufferSize size in bytes of frame in Buffer 6662 * 6663 * Return Value: None 6664 */ 6665static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info, 6666 const char *Buffer, unsigned int BufferSize) 6667{ 6668 unsigned short Copycount; 6669 unsigned int i = 0; 6670 DMABUFFERENTRY *pBufEntry; 6671 6672 if ( debug_level >= DEBUG_LEVEL_DATA ) 6673 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1); 6674 6675 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) { 6676 /* set CMR:13 to start transmit when 6677 * next GoAhead (abort) is received 6678 */ 6679 info->cmr_value |= BIT13; 6680 } 6681 6682 /* begin loading the frame in the next available tx dma 6683 * buffer, remember it's starting location for setting 6684 * up tx dma operation 6685 */ 6686 i = info->current_tx_buffer; 6687 info->start_tx_dma_buffer = i; 6688 6689 /* Setup the status and RCC (Frame Size) fields of the 1st */ 6690 /* buffer entry in the transmit DMA buffer list. */ 6691 6692 info->tx_buffer_list[i].status = info->cmr_value & 0xf000; 6693 info->tx_buffer_list[i].rcc = BufferSize; 6694 info->tx_buffer_list[i].count = BufferSize; 6695 6696 /* Copy frame data from 1st source buffer to the DMA buffers. */ 6697 /* The frame data may span multiple DMA buffers. */ 6698 6699 while( BufferSize ){ 6700 /* Get a pointer to next DMA buffer entry. */ 6701 pBufEntry = &info->tx_buffer_list[i++]; 6702 6703 if ( i == info->tx_buffer_count ) 6704 i=0; 6705 6706 /* Calculate the number of bytes that can be copied from */ 6707 /* the source buffer to this DMA buffer. */ 6708 if ( BufferSize > DMABUFFERSIZE ) 6709 Copycount = DMABUFFERSIZE; 6710 else 6711 Copycount = BufferSize; 6712 6713 /* Actually copy data from source buffer to DMA buffer. */ 6714 /* Also set the data count for this individual DMA buffer. */ 6715 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount); 6716 6717 pBufEntry->count = Copycount; 6718 6719 /* Advance source pointer and reduce remaining data count. */ 6720 Buffer += Copycount; 6721 BufferSize -= Copycount; 6722 6723 ++info->tx_dma_buffers_used; 6724 } 6725 6726 /* remember next available tx dma buffer */ 6727 info->current_tx_buffer = i; 6728 6729} /* end of mgsl_load_tx_dma_buffer() */ 6730 6731/* 6732 * mgsl_register_test() 6733 * 6734 * Performs a register test of the 16C32. 6735 * 6736 * Arguments: info pointer to device instance data 6737 * Return Value: true if test passed, otherwise false 6738 */ 6739static bool mgsl_register_test( struct mgsl_struct *info ) 6740{ 6741 static unsigned short BitPatterns[] = 6742 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f }; 6743 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns); 6744 unsigned int i; 6745 bool rc = true; 6746 unsigned long flags; 6747 6748 spin_lock_irqsave(&info->irq_spinlock,flags); 6749 usc_reset(info); 6750 6751 /* Verify the reset state of some registers. */ 6752 6753 if ( (usc_InReg( info, SICR ) != 0) || 6754 (usc_InReg( info, IVR ) != 0) || 6755 (usc_InDmaReg( info, DIVR ) != 0) ){ 6756 rc = false; 6757 } 6758 6759 if ( rc ){ 6760 /* Write bit patterns to various registers but do it out of */ 6761 /* sync, then read back and verify values. */ 6762 6763 for ( i = 0 ; i < Patterncount ; i++ ) { 6764 usc_OutReg( info, TC0R, BitPatterns[i] ); 6765 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] ); 6766 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] ); 6767 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] ); 6768 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] ); 6769 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] ); 6770 6771 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) || 6772 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) || 6773 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) || 6774 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) || 6775 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) || 6776 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){ 6777 rc = false; 6778 break; 6779 } 6780 } 6781 } 6782 6783 usc_reset(info); 6784 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6785 6786 return rc; 6787 6788} /* end of mgsl_register_test() */ 6789 6790/* mgsl_irq_test() Perform interrupt test of the 16C32. 6791 * 6792 * Arguments: info pointer to device instance data 6793 * Return Value: true if test passed, otherwise false 6794 */ 6795static bool mgsl_irq_test( struct mgsl_struct *info ) 6796{ 6797 unsigned long EndTime; 6798 unsigned long flags; 6799 6800 spin_lock_irqsave(&info->irq_spinlock,flags); 6801 usc_reset(info); 6802 6803 /* 6804 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition. 6805 * The ISR sets irq_occurred to true. 6806 */ 6807 6808 info->irq_occurred = false; 6809 6810 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */ 6811 /* Enable INTEN (Port 6, Bit12) */ 6812 /* This connects the IRQ request signal to the ISA bus */ 6813 /* on the ISA adapter. This has no effect for the PCI adapter */ 6814 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) ); 6815 6816 usc_EnableMasterIrqBit(info); 6817 usc_EnableInterrupts(info, IO_PIN); 6818 usc_ClearIrqPendingBits(info, IO_PIN); 6819 6820 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED); 6821 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE); 6822 6823 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6824 6825 EndTime=100; 6826 while( EndTime-- && !info->irq_occurred ) { 6827 msleep_interruptible(10); 6828 } 6829 6830 spin_lock_irqsave(&info->irq_spinlock,flags); 6831 usc_reset(info); 6832 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6833 6834 return info->irq_occurred; 6835 6836} /* end of mgsl_irq_test() */ 6837 6838/* mgsl_dma_test() 6839 * 6840 * Perform a DMA test of the 16C32. A small frame is 6841 * transmitted via DMA from a transmit buffer to a receive buffer 6842 * using single buffer DMA mode. 6843 * 6844 * Arguments: info pointer to device instance data 6845 * Return Value: true if test passed, otherwise false 6846 */ 6847static bool mgsl_dma_test( struct mgsl_struct *info ) 6848{ 6849 unsigned short FifoLevel; 6850 unsigned long phys_addr; 6851 unsigned int FrameSize; 6852 unsigned int i; 6853 char *TmpPtr; 6854 bool rc = true; 6855 unsigned short status=0; 6856 unsigned long EndTime; 6857 unsigned long flags; 6858 MGSL_PARAMS tmp_params; 6859 6860 /* save current port options */ 6861 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS)); 6862 /* load default port options */ 6863 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); 6864 6865#define TESTFRAMESIZE 40 6866 6867 spin_lock_irqsave(&info->irq_spinlock,flags); 6868 6869 /* setup 16C32 for SDLC DMA transfer mode */ 6870 6871 usc_reset(info); 6872 usc_set_sdlc_mode(info); 6873 usc_enable_loopback(info,1); 6874 6875 /* Reprogram the RDMR so that the 16C32 does NOT clear the count 6876 * field of the buffer entry after fetching buffer address. This 6877 * way we can detect a DMA failure for a DMA read (which should be 6878 * non-destructive to system memory) before we try and write to 6879 * memory (where a failure could corrupt system memory). 6880 */ 6881 6882 /* Receive DMA mode Register (RDMR) 6883 * 6884 * <15..14> 11 DMA mode = Linked List Buffer mode 6885 * <13> 1 RSBinA/L = store Rx status Block in List entry 6886 * <12> 0 1 = Clear count of List Entry after fetching 6887 * <11..10> 00 Address mode = Increment 6888 * <9> 1 Terminate Buffer on RxBound 6889 * <8> 0 Bus Width = 16bits 6890 * <7..0> ? status Bits (write as 0s) 6891 * 6892 * 1110 0010 0000 0000 = 0xe200 6893 */ 6894 6895 usc_OutDmaReg( info, RDMR, 0xe200 ); 6896 6897 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6898 6899 6900 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */ 6901 6902 FrameSize = TESTFRAMESIZE; 6903 6904 /* setup 1st transmit buffer entry: */ 6905 /* with frame size and transmit control word */ 6906 6907 info->tx_buffer_list[0].count = FrameSize; 6908 info->tx_buffer_list[0].rcc = FrameSize; 6909 info->tx_buffer_list[0].status = 0x4000; 6910 6911 /* build a transmit frame in 1st transmit DMA buffer */ 6912 6913 TmpPtr = info->tx_buffer_list[0].virt_addr; 6914 for (i = 0; i < FrameSize; i++ ) 6915 *TmpPtr++ = i; 6916 6917 /* setup 1st receive buffer entry: */ 6918 /* clear status, set max receive buffer size */ 6919 6920 info->rx_buffer_list[0].status = 0; 6921 info->rx_buffer_list[0].count = FrameSize + 4; 6922 6923 /* zero out the 1st receive buffer */ 6924 6925 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 ); 6926 6927 /* Set count field of next buffer entries to prevent */ 6928 /* 16C32 from using buffers after the 1st one. */ 6929 6930 info->tx_buffer_list[1].count = 0; 6931 info->rx_buffer_list[1].count = 0; 6932 6933 6934 /***************************/ 6935 /* Program 16C32 receiver. */ 6936 /***************************/ 6937 6938 spin_lock_irqsave(&info->irq_spinlock,flags); 6939 6940 /* setup DMA transfers */ 6941 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 6942 6943 /* program 16C32 receiver with physical address of 1st DMA buffer entry */ 6944 phys_addr = info->rx_buffer_list[0].phys_entry; 6945 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr ); 6946 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) ); 6947 6948 /* Clear the Rx DMA status bits (read RDMR) and start channel */ 6949 usc_InDmaReg( info, RDMR ); 6950 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 6951 6952 /* Enable Receiver (RMR <1..0> = 10) */ 6953 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) ); 6954 6955 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6956 6957 6958 /*************************************************************/ 6959 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */ 6960 /*************************************************************/ 6961 6962 /* Wait 100ms for interrupt. */ 6963 EndTime = jiffies + msecs_to_jiffies(100); 6964 6965 for(;;) { 6966 if (time_after(jiffies, EndTime)) { 6967 rc = false; 6968 break; 6969 } 6970 6971 spin_lock_irqsave(&info->irq_spinlock,flags); 6972 status = usc_InDmaReg( info, RDMR ); 6973 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6974 6975 if ( !(status & BIT4) && (status & BIT5) ) { 6976 /* INITG (BIT 4) is inactive (no entry read in progress) AND */ 6977 /* BUSY (BIT 5) is active (channel still active). */ 6978 /* This means the buffer entry read has completed. */ 6979 break; 6980 } 6981 } 6982 6983 6984 /******************************/ 6985 /* Program 16C32 transmitter. */ 6986 /******************************/ 6987 6988 spin_lock_irqsave(&info->irq_spinlock,flags); 6989 6990 /* Program the Transmit Character Length Register (TCLR) */ 6991 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 6992 6993 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count ); 6994 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 6995 6996 /* Program the address of the 1st DMA Buffer Entry in linked list */ 6997 6998 phys_addr = info->tx_buffer_list[0].phys_entry; 6999 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr ); 7000 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) ); 7001 7002 /* unlatch Tx status bits, and start transmit channel. */ 7003 7004 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) ); 7005 usc_DmaCmd( info, DmaCmd_InitTxChannel ); 7006 7007 /* wait for DMA controller to fill transmit FIFO */ 7008 7009 usc_TCmd( info, TCmd_SelectTicrTxFifostatus ); 7010 7011 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7012 7013 7014 /**********************************/ 7015 /* WAIT FOR TRANSMIT FIFO TO FILL */ 7016 /**********************************/ 7017 7018 /* Wait 100ms */ 7019 EndTime = jiffies + msecs_to_jiffies(100); 7020 7021 for(;;) { 7022 if (time_after(jiffies, EndTime)) { 7023 rc = false; 7024 break; 7025 } 7026 7027 spin_lock_irqsave(&info->irq_spinlock,flags); 7028 FifoLevel = usc_InReg(info, TICR) >> 8; 7029 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7030 7031 if ( FifoLevel < 16 ) 7032 break; 7033 else 7034 if ( FrameSize < 32 ) { 7035 /* This frame is smaller than the entire transmit FIFO */ 7036 /* so wait for the entire frame to be loaded. */ 7037 if ( FifoLevel <= (32 - FrameSize) ) 7038 break; 7039 } 7040 } 7041 7042 7043 if ( rc ) 7044 { 7045 /* Enable 16C32 transmitter. */ 7046 7047 spin_lock_irqsave(&info->irq_spinlock,flags); 7048 7049 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */ 7050 usc_TCmd( info, TCmd_SendFrame ); 7051 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) ); 7052 7053 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7054 7055 7056 /******************************/ 7057 /* WAIT FOR TRANSMIT COMPLETE */ 7058 /******************************/ 7059 7060 /* Wait 100ms */ 7061 EndTime = jiffies + msecs_to_jiffies(100); 7062 7063 /* While timer not expired wait for transmit complete */ 7064 7065 spin_lock_irqsave(&info->irq_spinlock,flags); 7066 status = usc_InReg( info, TCSR ); 7067 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7068 7069 while ( !(status & (BIT6 | BIT5 | BIT4 | BIT2 | BIT1)) ) { 7070 if (time_after(jiffies, EndTime)) { 7071 rc = false; 7072 break; 7073 } 7074 7075 spin_lock_irqsave(&info->irq_spinlock,flags); 7076 status = usc_InReg( info, TCSR ); 7077 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7078 } 7079 } 7080 7081 7082 if ( rc ){ 7083 /* CHECK FOR TRANSMIT ERRORS */ 7084 if ( status & (BIT5 | BIT1) ) 7085 rc = false; 7086 } 7087 7088 if ( rc ) { 7089 /* WAIT FOR RECEIVE COMPLETE */ 7090 7091 /* Wait 100ms */ 7092 EndTime = jiffies + msecs_to_jiffies(100); 7093 7094 /* Wait for 16C32 to write receive status to buffer entry. */ 7095 status=info->rx_buffer_list[0].status; 7096 while ( status == 0 ) { 7097 if (time_after(jiffies, EndTime)) { 7098 rc = false; 7099 break; 7100 } 7101 status=info->rx_buffer_list[0].status; 7102 } 7103 } 7104 7105 7106 if ( rc ) { 7107 /* CHECK FOR RECEIVE ERRORS */ 7108 status = info->rx_buffer_list[0].status; 7109 7110 if ( status & (BIT8 | BIT3 | BIT1) ) { 7111 /* receive error has occurred */ 7112 rc = false; 7113 } else { 7114 if ( memcmp( info->tx_buffer_list[0].virt_addr , 7115 info->rx_buffer_list[0].virt_addr, FrameSize ) ){ 7116 rc = false; 7117 } 7118 } 7119 } 7120 7121 spin_lock_irqsave(&info->irq_spinlock,flags); 7122 usc_reset( info ); 7123 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7124 7125 /* restore current port options */ 7126 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); 7127 7128 return rc; 7129 7130} /* end of mgsl_dma_test() */ 7131 7132/* mgsl_adapter_test() 7133 * 7134 * Perform the register, IRQ, and DMA tests for the 16C32. 7135 * 7136 * Arguments: info pointer to device instance data 7137 * Return Value: 0 if success, otherwise -ENODEV 7138 */ 7139static int mgsl_adapter_test( struct mgsl_struct *info ) 7140{ 7141 if ( debug_level >= DEBUG_LEVEL_INFO ) 7142 printk( "%s(%d):Testing device %s\n", 7143 __FILE__,__LINE__,info->device_name ); 7144 7145 if ( !mgsl_register_test( info ) ) { 7146 info->init_error = DiagStatus_AddressFailure; 7147 printk( "%s(%d):Register test failure for device %s Addr=%04X\n", 7148 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) ); 7149 return -ENODEV; 7150 } 7151 7152 if ( !mgsl_irq_test( info ) ) { 7153 info->init_error = DiagStatus_IrqFailure; 7154 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n", 7155 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) ); 7156 return -ENODEV; 7157 } 7158 7159 if ( !mgsl_dma_test( info ) ) { 7160 info->init_error = DiagStatus_DmaFailure; 7161 printk( "%s(%d):DMA test failure for device %s DMA=%d\n", 7162 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) ); 7163 return -ENODEV; 7164 } 7165 7166 if ( debug_level >= DEBUG_LEVEL_INFO ) 7167 printk( "%s(%d):device %s passed diagnostics\n", 7168 __FILE__,__LINE__,info->device_name ); 7169 7170 return 0; 7171 7172} /* end of mgsl_adapter_test() */ 7173 7174/* mgsl_memory_test() 7175 * 7176 * Test the shared memory on a PCI adapter. 7177 * 7178 * Arguments: info pointer to device instance data 7179 * Return Value: true if test passed, otherwise false 7180 */ 7181static bool mgsl_memory_test( struct mgsl_struct *info ) 7182{ 7183 static unsigned long BitPatterns[] = 7184 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 }; 7185 unsigned long Patterncount = ARRAY_SIZE(BitPatterns); 7186 unsigned long i; 7187 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long); 7188 unsigned long * TestAddr; 7189 7190 TestAddr = (unsigned long *)info->memory_base; 7191 7192 /* Test data lines with test pattern at one location. */ 7193 7194 for ( i = 0 ; i < Patterncount ; i++ ) { 7195 *TestAddr = BitPatterns[i]; 7196 if ( *TestAddr != BitPatterns[i] ) 7197 return false; 7198 } 7199 7200 /* Test address lines with incrementing pattern over */ 7201 /* entire address range. */ 7202 7203 for ( i = 0 ; i < TestLimit ; i++ ) { 7204 *TestAddr = i * 4; 7205 TestAddr++; 7206 } 7207 7208 TestAddr = (unsigned long *)info->memory_base; 7209 7210 for ( i = 0 ; i < TestLimit ; i++ ) { 7211 if ( *TestAddr != i * 4 ) 7212 return false; 7213 TestAddr++; 7214 } 7215 7216 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE ); 7217 7218 return true; 7219 7220} /* End Of mgsl_memory_test() */ 7221 7222 7223/* mgsl_load_pci_memory() 7224 * 7225 * Load a large block of data into the PCI shared memory. 7226 * Use this instead of memcpy() or memmove() to move data 7227 * into the PCI shared memory. 7228 * 7229 * Notes: 7230 * 7231 * This function prevents the PCI9050 interface chip from hogging 7232 * the adapter local bus, which can starve the 16C32 by preventing 7233 * 16C32 bus master cycles. 7234 * 7235 * The PCI9050 documentation says that the 9050 will always release 7236 * control of the local bus after completing the current read 7237 * or write operation. 7238 * 7239 * It appears that as long as the PCI9050 write FIFO is full, the 7240 * PCI9050 treats all of the writes as a single burst transaction 7241 * and will not release the bus. This causes DMA latency problems 7242 * at high speeds when copying large data blocks to the shared 7243 * memory. 7244 * 7245 * This function in effect, breaks the a large shared memory write 7246 * into multiple transations by interleaving a shared memory read 7247 * which will flush the write FIFO and 'complete' the write 7248 * transation. This allows any pending DMA request to gain control 7249 * of the local bus in a timely fasion. 7250 * 7251 * Arguments: 7252 * 7253 * TargetPtr pointer to target address in PCI shared memory 7254 * SourcePtr pointer to source buffer for data 7255 * count count in bytes of data to copy 7256 * 7257 * Return Value: None 7258 */ 7259static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr, 7260 unsigned short count ) 7261{ 7262 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */ 7263#define PCI_LOAD_INTERVAL 64 7264 7265 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL; 7266 unsigned short Index; 7267 unsigned long Dummy; 7268 7269 for ( Index = 0 ; Index < Intervalcount ; Index++ ) 7270 { 7271 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL); 7272 Dummy = *((volatile unsigned long *)TargetPtr); 7273 TargetPtr += PCI_LOAD_INTERVAL; 7274 SourcePtr += PCI_LOAD_INTERVAL; 7275 } 7276 7277 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL ); 7278 7279} /* End Of mgsl_load_pci_memory() */ 7280 7281static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit) 7282{ 7283 int i; 7284 int linecount; 7285 if (xmit) 7286 printk("%s tx data:\n",info->device_name); 7287 else 7288 printk("%s rx data:\n",info->device_name); 7289 7290 while(count) { 7291 if (count > 16) 7292 linecount = 16; 7293 else 7294 linecount = count; 7295 7296 for(i=0;i<linecount;i++) 7297 printk("%02X ",(unsigned char)data[i]); 7298 for(;i<17;i++) 7299 printk(" "); 7300 for(i=0;i<linecount;i++) { 7301 if (data[i]>=040 && data[i]<=0176) 7302 printk("%c",data[i]); 7303 else 7304 printk("."); 7305 } 7306 printk("\n"); 7307 7308 data += linecount; 7309 count -= linecount; 7310 } 7311} /* end of mgsl_trace_block() */ 7312 7313/* mgsl_tx_timeout() 7314 * 7315 * called when HDLC frame times out 7316 * update stats and do tx completion processing 7317 * 7318 * Arguments: context pointer to device instance data 7319 * Return Value: None 7320 */ 7321static void mgsl_tx_timeout(struct timer_list *t) 7322{ 7323 struct mgsl_struct *info = from_timer(info, t, tx_timer); 7324 unsigned long flags; 7325 7326 if ( debug_level >= DEBUG_LEVEL_INFO ) 7327 printk( "%s(%d):mgsl_tx_timeout(%s)\n", 7328 __FILE__,__LINE__,info->device_name); 7329 if(info->tx_active && 7330 (info->params.mode == MGSL_MODE_HDLC || 7331 info->params.mode == MGSL_MODE_RAW) ) { 7332 info->icount.txtimeout++; 7333 } 7334 spin_lock_irqsave(&info->irq_spinlock,flags); 7335 info->tx_active = false; 7336 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 7337 7338 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 7339 usc_loopmode_cancel_transmit( info ); 7340 7341 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7342 7343#if SYNCLINK_GENERIC_HDLC 7344 if (info->netcount) 7345 hdlcdev_tx_done(info); 7346 else 7347#endif 7348 mgsl_bh_transmit(info); 7349 7350} /* end of mgsl_tx_timeout() */ 7351 7352/* signal that there are no more frames to send, so that 7353 * line is 'released' by echoing RxD to TxD when current 7354 * transmission is complete (or immediately if no tx in progress). 7355 */ 7356static int mgsl_loopmode_send_done( struct mgsl_struct * info ) 7357{ 7358 unsigned long flags; 7359 7360 spin_lock_irqsave(&info->irq_spinlock,flags); 7361 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) { 7362 if (info->tx_active) 7363 info->loopmode_send_done_requested = true; 7364 else 7365 usc_loopmode_send_done(info); 7366 } 7367 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7368 7369 return 0; 7370} 7371 7372/* release the line by echoing RxD to TxD 7373 * upon completion of a transmit frame 7374 */ 7375static void usc_loopmode_send_done( struct mgsl_struct * info ) 7376{ 7377 info->loopmode_send_done_requested = false; 7378 /* clear CMR:13 to 0 to start echoing RxData to TxData */ 7379 info->cmr_value &= ~BIT13; 7380 usc_OutReg(info, CMR, info->cmr_value); 7381} 7382 7383/* abort a transmit in progress while in HDLC LoopMode 7384 */ 7385static void usc_loopmode_cancel_transmit( struct mgsl_struct * info ) 7386{ 7387 /* reset tx dma channel and purge TxFifo */ 7388 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 7389 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 7390 usc_loopmode_send_done( info ); 7391} 7392 7393/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled 7394 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort) 7395 * we must clear CMR:13 to begin repeating TxData to RxData 7396 */ 7397static void usc_loopmode_insert_request( struct mgsl_struct * info ) 7398{ 7399 info->loopmode_insert_requested = true; 7400 7401 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to 7402 * begin repeating TxData on RxData (complete insertion) 7403 */ 7404 usc_OutReg( info, RICR, 7405 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) ); 7406 7407 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */ 7408 info->cmr_value |= BIT13; 7409 usc_OutReg(info, CMR, info->cmr_value); 7410} 7411 7412/* return 1 if station is inserted into the loop, otherwise 0 7413 */ 7414static int usc_loopmode_active( struct mgsl_struct * info) 7415{ 7416 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ; 7417} 7418 7419#if SYNCLINK_GENERIC_HDLC 7420 7421/** 7422 * hdlcdev_attach - called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) 7423 * @dev: pointer to network device structure 7424 * @encoding: serial encoding setting 7425 * @parity: FCS setting 7426 * 7427 * Set encoding and frame check sequence (FCS) options. 7428 * 7429 * Return: 0 if success, otherwise error code 7430 */ 7431static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, 7432 unsigned short parity) 7433{ 7434 struct mgsl_struct *info = dev_to_port(dev); 7435 unsigned char new_encoding; 7436 unsigned short new_crctype; 7437 7438 /* return error if TTY interface open */ 7439 if (info->port.count) 7440 return -EBUSY; 7441 7442 switch (encoding) 7443 { 7444 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break; 7445 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break; 7446 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break; 7447 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break; 7448 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break; 7449 default: return -EINVAL; 7450 } 7451 7452 switch (parity) 7453 { 7454 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break; 7455 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break; 7456 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break; 7457 default: return -EINVAL; 7458 } 7459 7460 info->params.encoding = new_encoding; 7461 info->params.crc_type = new_crctype; 7462 7463 /* if network interface up, reprogram hardware */ 7464 if (info->netcount) 7465 mgsl_program_hw(info); 7466 7467 return 0; 7468} 7469 7470/** 7471 * hdlcdev_xmit - called by generic HDLC layer to send a frame 7472 * @skb: socket buffer containing HDLC frame 7473 * @dev: pointer to network device structure 7474 */ 7475static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, 7476 struct net_device *dev) 7477{ 7478 struct mgsl_struct *info = dev_to_port(dev); 7479 unsigned long flags; 7480 7481 if (debug_level >= DEBUG_LEVEL_INFO) 7482 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name); 7483 7484 /* stop sending until this frame completes */ 7485 netif_stop_queue(dev); 7486 7487 /* copy data to device buffers */ 7488 info->xmit_cnt = skb->len; 7489 mgsl_load_tx_dma_buffer(info, skb->data, skb->len); 7490 7491 /* update network statistics */ 7492 dev->stats.tx_packets++; 7493 dev->stats.tx_bytes += skb->len; 7494 7495 /* done with socket buffer, so free it */ 7496 dev_kfree_skb(skb); 7497 7498 /* save start time for transmit timeout detection */ 7499 netif_trans_update(dev); 7500 7501 /* start hardware transmitter if necessary */ 7502 spin_lock_irqsave(&info->irq_spinlock,flags); 7503 if (!info->tx_active) 7504 usc_start_transmitter(info); 7505 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7506 7507 return NETDEV_TX_OK; 7508} 7509 7510/** 7511 * hdlcdev_open - called by network layer when interface enabled 7512 * @dev: pointer to network device structure 7513 * 7514 * Claim resources and initialize hardware. 7515 * 7516 * Return: 0 if success, otherwise error code 7517 */ 7518static int hdlcdev_open(struct net_device *dev) 7519{ 7520 struct mgsl_struct *info = dev_to_port(dev); 7521 int rc; 7522 unsigned long flags; 7523 7524 if (debug_level >= DEBUG_LEVEL_INFO) 7525 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name); 7526 7527 /* generic HDLC layer open processing */ 7528 rc = hdlc_open(dev); 7529 if (rc) 7530 return rc; 7531 7532 /* arbitrate between network and tty opens */ 7533 spin_lock_irqsave(&info->netlock, flags); 7534 if (info->port.count != 0 || info->netcount != 0) { 7535 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); 7536 spin_unlock_irqrestore(&info->netlock, flags); 7537 return -EBUSY; 7538 } 7539 info->netcount=1; 7540 spin_unlock_irqrestore(&info->netlock, flags); 7541 7542 /* claim resources and init adapter */ 7543 if ((rc = startup(info)) != 0) { 7544 spin_lock_irqsave(&info->netlock, flags); 7545 info->netcount=0; 7546 spin_unlock_irqrestore(&info->netlock, flags); 7547 return rc; 7548 } 7549 7550 /* assert RTS and DTR, apply hardware settings */ 7551 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR; 7552 mgsl_program_hw(info); 7553 7554 /* enable network layer transmit */ 7555 netif_trans_update(dev); 7556 netif_start_queue(dev); 7557 7558 /* inform generic HDLC layer of current DCD status */ 7559 spin_lock_irqsave(&info->irq_spinlock, flags); 7560 usc_get_serial_signals(info); 7561 spin_unlock_irqrestore(&info->irq_spinlock, flags); 7562 if (info->serial_signals & SerialSignal_DCD) 7563 netif_carrier_on(dev); 7564 else 7565 netif_carrier_off(dev); 7566 return 0; 7567} 7568 7569/** 7570 * hdlcdev_close - called by network layer when interface is disabled 7571 * @dev: pointer to network device structure 7572 * 7573 * Shutdown hardware and release resources. 7574 * 7575 * Return: 0 if success, otherwise error code 7576 */ 7577static int hdlcdev_close(struct net_device *dev) 7578{ 7579 struct mgsl_struct *info = dev_to_port(dev); 7580 unsigned long flags; 7581 7582 if (debug_level >= DEBUG_LEVEL_INFO) 7583 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name); 7584 7585 netif_stop_queue(dev); 7586 7587 /* shutdown adapter and release resources */ 7588 shutdown(info); 7589 7590 hdlc_close(dev); 7591 7592 spin_lock_irqsave(&info->netlock, flags); 7593 info->netcount=0; 7594 spin_unlock_irqrestore(&info->netlock, flags); 7595 7596 return 0; 7597} 7598 7599/** 7600 * hdlcdev_ioctl - called by network layer to process IOCTL call to network device 7601 * @dev: pointer to network device structure 7602 * @ifr: pointer to network interface request structure 7603 * @cmd: IOCTL command code 7604 * 7605 * Return: 0 if success, otherwise error code 7606 */ 7607static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 7608{ 7609 const size_t size = sizeof(sync_serial_settings); 7610 sync_serial_settings new_line; 7611 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; 7612 struct mgsl_struct *info = dev_to_port(dev); 7613 unsigned int flags; 7614 7615 if (debug_level >= DEBUG_LEVEL_INFO) 7616 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); 7617 7618 /* return error if TTY interface open */ 7619 if (info->port.count) 7620 return -EBUSY; 7621 7622 if (cmd != SIOCWANDEV) 7623 return hdlc_ioctl(dev, ifr, cmd); 7624 7625 switch(ifr->ifr_settings.type) { 7626 case IF_GET_IFACE: /* return current sync_serial_settings */ 7627 7628 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; 7629 if (ifr->ifr_settings.size < size) { 7630 ifr->ifr_settings.size = size; /* data size wanted */ 7631 return -ENOBUFS; 7632 } 7633 7634 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7635 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7636 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7637 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); 7638 7639 memset(&new_line, 0, sizeof(new_line)); 7640 switch (flags){ 7641 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break; 7642 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break; 7643 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break; 7644 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break; 7645 default: new_line.clock_type = CLOCK_DEFAULT; 7646 } 7647 7648 new_line.clock_rate = info->params.clock_speed; 7649 new_line.loopback = info->params.loopback ? 1:0; 7650 7651 if (copy_to_user(line, &new_line, size)) 7652 return -EFAULT; 7653 return 0; 7654 7655 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */ 7656 7657 if(!capable(CAP_NET_ADMIN)) 7658 return -EPERM; 7659 if (copy_from_user(&new_line, line, size)) 7660 return -EFAULT; 7661 7662 switch (new_line.clock_type) 7663 { 7664 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break; 7665 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break; 7666 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break; 7667 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break; 7668 case CLOCK_DEFAULT: flags = info->params.flags & 7669 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7670 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7671 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7672 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break; 7673 default: return -EINVAL; 7674 } 7675 7676 if (new_line.loopback != 0 && new_line.loopback != 1) 7677 return -EINVAL; 7678 7679 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7680 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7681 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7682 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); 7683 info->params.flags |= flags; 7684 7685 info->params.loopback = new_line.loopback; 7686 7687 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG)) 7688 info->params.clock_speed = new_line.clock_rate; 7689 else 7690 info->params.clock_speed = 0; 7691 7692 /* if network interface up, reprogram hardware */ 7693 if (info->netcount) 7694 mgsl_program_hw(info); 7695 return 0; 7696 7697 default: 7698 return hdlc_ioctl(dev, ifr, cmd); 7699 } 7700} 7701 7702/** 7703 * hdlcdev_tx_timeout - called by network layer when transmit timeout is detected 7704 * 7705 * @dev: pointer to network device structure 7706 */ 7707static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue) 7708{ 7709 struct mgsl_struct *info = dev_to_port(dev); 7710 unsigned long flags; 7711 7712 if (debug_level >= DEBUG_LEVEL_INFO) 7713 printk("hdlcdev_tx_timeout(%s)\n",dev->name); 7714 7715 dev->stats.tx_errors++; 7716 dev->stats.tx_aborted_errors++; 7717 7718 spin_lock_irqsave(&info->irq_spinlock,flags); 7719 usc_stop_transmitter(info); 7720 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7721 7722 netif_wake_queue(dev); 7723} 7724 7725/** 7726 * hdlcdev_tx_done - called by device driver when transmit completes 7727 * @info: pointer to device instance information 7728 * 7729 * Reenable network layer transmit if stopped. 7730 */ 7731static void hdlcdev_tx_done(struct mgsl_struct *info) 7732{ 7733 if (netif_queue_stopped(info->netdev)) 7734 netif_wake_queue(info->netdev); 7735} 7736 7737/** 7738 * hdlcdev_rx - called by device driver when frame received 7739 * @info: pointer to device instance information 7740 * @buf: pointer to buffer contianing frame data 7741 * @size: count of data bytes in buf 7742 * 7743 * Pass frame to network layer. 7744 */ 7745static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size) 7746{ 7747 struct sk_buff *skb = dev_alloc_skb(size); 7748 struct net_device *dev = info->netdev; 7749 7750 if (debug_level >= DEBUG_LEVEL_INFO) 7751 printk("hdlcdev_rx(%s)\n", dev->name); 7752 7753 if (skb == NULL) { 7754 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", 7755 dev->name); 7756 dev->stats.rx_dropped++; 7757 return; 7758 } 7759 7760 skb_put_data(skb, buf, size); 7761 7762 skb->protocol = hdlc_type_trans(skb, dev); 7763 7764 dev->stats.rx_packets++; 7765 dev->stats.rx_bytes += size; 7766 7767 netif_rx(skb); 7768} 7769 7770static const struct net_device_ops hdlcdev_ops = { 7771 .ndo_open = hdlcdev_open, 7772 .ndo_stop = hdlcdev_close, 7773 .ndo_start_xmit = hdlc_start_xmit, 7774 .ndo_do_ioctl = hdlcdev_ioctl, 7775 .ndo_tx_timeout = hdlcdev_tx_timeout, 7776}; 7777 7778/** 7779 * hdlcdev_init - called by device driver when adding device instance 7780 * @info: pointer to device instance information 7781 * 7782 * Do generic HDLC initialization. 7783 * 7784 * Return: 0 if success, otherwise error code 7785 */ 7786static int hdlcdev_init(struct mgsl_struct *info) 7787{ 7788 int rc; 7789 struct net_device *dev; 7790 hdlc_device *hdlc; 7791 7792 /* allocate and initialize network and HDLC layer objects */ 7793 7794 dev = alloc_hdlcdev(info); 7795 if (!dev) { 7796 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__); 7797 return -ENOMEM; 7798 } 7799 7800 /* for network layer reporting purposes only */ 7801 dev->base_addr = info->io_base; 7802 dev->irq = info->irq_level; 7803 dev->dma = info->dma_level; 7804 7805 /* network layer callbacks and settings */ 7806 dev->netdev_ops = &hdlcdev_ops; 7807 dev->watchdog_timeo = 10 * HZ; 7808 dev->tx_queue_len = 50; 7809 7810 /* generic HDLC layer callbacks and settings */ 7811 hdlc = dev_to_hdlc(dev); 7812 hdlc->attach = hdlcdev_attach; 7813 hdlc->xmit = hdlcdev_xmit; 7814 7815 /* register objects with HDLC layer */ 7816 rc = register_hdlc_device(dev); 7817 if (rc) { 7818 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__); 7819 free_netdev(dev); 7820 return rc; 7821 } 7822 7823 info->netdev = dev; 7824 return 0; 7825} 7826 7827/** 7828 * hdlcdev_exit - called by device driver when removing device instance 7829 * @info: pointer to device instance information 7830 * 7831 * Do generic HDLC cleanup. 7832 */ 7833static void hdlcdev_exit(struct mgsl_struct *info) 7834{ 7835 unregister_hdlc_device(info->netdev); 7836 free_netdev(info->netdev); 7837 info->netdev = NULL; 7838} 7839 7840#endif /* CONFIG_HDLC */ 7841 7842 7843static int synclink_init_one (struct pci_dev *dev, 7844 const struct pci_device_id *ent) 7845{ 7846 struct mgsl_struct *info; 7847 7848 if (pci_enable_device(dev)) { 7849 printk("error enabling pci device %p\n", dev); 7850 return -EIO; 7851 } 7852 7853 info = mgsl_allocate_device(); 7854 if (!info) { 7855 printk("can't allocate device instance data.\n"); 7856 return -EIO; 7857 } 7858 7859 /* Copy user configuration info to device instance data */ 7860 7861 info->io_base = pci_resource_start(dev, 2); 7862 info->irq_level = dev->irq; 7863 info->phys_memory_base = pci_resource_start(dev, 3); 7864 7865 /* Because veremap only works on page boundaries we must map 7866 * a larger area than is actually implemented for the LCR 7867 * memory range. We map a full page starting at the page boundary. 7868 */ 7869 info->phys_lcr_base = pci_resource_start(dev, 0); 7870 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1); 7871 info->phys_lcr_base &= ~(PAGE_SIZE-1); 7872 7873 info->io_addr_size = 8; 7874 info->irq_flags = IRQF_SHARED; 7875 7876 if (dev->device == 0x0210) { 7877 /* Version 1 PCI9030 based universal PCI adapter */ 7878 info->misc_ctrl_value = 0x007c4080; 7879 info->hw_version = 1; 7880 } else { 7881 /* Version 0 PCI9050 based 5V PCI adapter 7882 * A PCI9050 bug prevents reading LCR registers if 7883 * LCR base address bit 7 is set. Maintain shadow 7884 * value so we can write to LCR misc control reg. 7885 */ 7886 info->misc_ctrl_value = 0x087e4546; 7887 info->hw_version = 0; 7888 } 7889 7890 mgsl_add_device(info); 7891 7892 return 0; 7893} 7894 7895static void synclink_remove_one (struct pci_dev *dev) 7896{ 7897} 7898 7899