Lines Matching refs:up

166 static inline unsigned int sio_in(struct uart_txx9_port *up, int offset)
168 switch (up->port.iotype) {
170 return __raw_readl(up->port.membase + offset);
172 return inl(up->port.iobase + offset);
177 sio_out(struct uart_txx9_port *up, int offset, int value)
179 switch (up->port.iotype) {
181 __raw_writel(value, up->port.membase + offset);
184 outl(value, up->port.iobase + offset);
190 sio_mask(struct uart_txx9_port *up, int offset, unsigned int value)
192 sio_out(up, offset, sio_in(up, offset) & ~value);
195 sio_set(struct uart_txx9_port *up, int offset, unsigned int value)
197 sio_out(up, offset, sio_in(up, offset) | value);
201 sio_quot_set(struct uart_txx9_port *up, int quot)
205 sio_out(up, TXX9_SIBGR, quot | TXX9_SIBGR_BCLK_T0);
207 sio_out(up, TXX9_SIBGR, (quot >> 2) | TXX9_SIBGR_BCLK_T2);
209 sio_out(up, TXX9_SIBGR, (quot >> 4) | TXX9_SIBGR_BCLK_T4);
211 sio_out(up, TXX9_SIBGR, (quot >> 6) | TXX9_SIBGR_BCLK_T6);
213 sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6);
223 struct uart_txx9_port *up = to_uart_txx9_port(port);
224 sio_mask(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
229 struct uart_txx9_port *up = to_uart_txx9_port(port);
230 sio_set(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
235 struct uart_txx9_port *up = to_uart_txx9_port(port);
236 up->port.read_status_mask &= ~TXX9_SIDISR_RDIS;
241 struct uart_txx9_port *up = to_uart_txx9_port(port);
244 sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST);
248 while ((sio_in(up, TXX9_SIFCR) & TXX9_SIFCR_SWRST) && --tmout)
251 sio_set(up, TXX9_SIFCR,
254 sio_out(up, TXX9_SILCR,
256 ((up->port.flags & UPF_TXX9_USE_SCLK) ?
258 sio_quot_set(up, uart_get_divisor(port, 9600));
259 sio_out(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSTL_MAX /* 15 */);
260 sio_out(up, TXX9_SIDICR, 0);
264 receive_chars(struct uart_txx9_port *up, unsigned int *status)
273 ch = sio_in(up, TXX9_SIRFIFO);
275 up->port.icount.rx++;
279 up->port.ignore_status_mask & ~TXX9_SIDISR_RFDN_MASK;
287 up->port.icount.brk++;
294 if (uart_handle_break(&up->port))
297 up->port.icount.parity++;
299 up->port.icount.frame++;
301 up->port.icount.overrun++;
315 disr &= up->port.read_status_mask;
324 if (uart_handle_sysrq_char(&up->port, ch))
327 uart_insert_char(&up->port, disr, TXX9_SIDISR_UOER, ch, flag);
330 up->port.ignore_status_mask = next_ignore_status_mask;
331 disr = sio_in(up, TXX9_SIDISR);
333 spin_unlock(&up->port.lock);
334 tty_flip_buffer_push(&up->port.state->port);
335 spin_lock(&up->port.lock);
339 static inline void transmit_chars(struct uart_txx9_port *up)
341 struct circ_buf *xmit = &up->port.state->xmit;
344 if (up->port.x_char) {
345 sio_out(up, TXX9_SITFIFO, up->port.x_char);
346 up->port.icount.tx++;
347 up->port.x_char = 0;
350 if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
351 serial_txx9_stop_tx(&up->port);
357 sio_out(up, TXX9_SITFIFO, xmit->buf[xmit->tail]);
359 up->port.icount.tx++;
365 uart_write_wakeup(&up->port);
368 serial_txx9_stop_tx(&up->port);
374 struct uart_txx9_port *up = dev_id;
378 spin_lock(&up->port.lock);
379 status = sio_in(up, TXX9_SIDISR);
380 if (!(sio_in(up, TXX9_SIDICR) & TXX9_SIDICR_TIE))
384 spin_unlock(&up->port.lock);
389 receive_chars(up, &status);
391 transmit_chars(up);
393 sio_mask(up, TXX9_SIDISR,
396 spin_unlock(&up->port.lock);
407 struct uart_txx9_port *up = to_uart_txx9_port(port);
411 spin_lock_irqsave(&up->port.lock, flags);
412 ret = (sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS) ? TIOCSER_TEMT : 0;
413 spin_unlock_irqrestore(&up->port.lock, flags);
420 struct uart_txx9_port *up = to_uart_txx9_port(port);
425 ret |= (sio_in(up, TXX9_SIFLCR) & TXX9_SIFLCR_RTSSC) ? 0 : TIOCM_RTS;
426 ret |= (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS) ? 0 : TIOCM_CTS;
433 struct uart_txx9_port *up = to_uart_txx9_port(port);
436 sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC);
438 sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC);
443 struct uart_txx9_port *up = to_uart_txx9_port(port);
446 spin_lock_irqsave(&up->port.lock, flags);
448 sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
450 sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
451 spin_unlock_irqrestore(&up->port.lock, flags);
458 static void wait_for_xmitr(struct uart_txx9_port *up)
462 /* Wait up to 10ms for the character(s) to be sent. */
464 !(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS))
467 /* Wait up to 1s for flow control if necessary */
468 if (up->port.flags & UPF_CONS_FLOW) {
471 (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS))
487 struct uart_txx9_port *up = to_uart_txx9_port(port);
492 ier = sio_in(up, TXX9_SIDICR);
493 sio_out(up, TXX9_SIDICR, 0);
495 while (sio_in(up, TXX9_SIDISR) & TXX9_SIDISR_UVALID)
498 c = sio_in(up, TXX9_SIRFIFO);
504 sio_mask(up, TXX9_SIDISR, TXX9_SIDISR_RDIS);
505 sio_out(up, TXX9_SIDICR, ier);
513 struct uart_txx9_port *up = to_uart_txx9_port(port);
518 ier = sio_in(up, TXX9_SIDICR);
519 sio_out(up, TXX9_SIDICR, 0);
521 wait_for_xmitr(up);
525 sio_out(up, TXX9_SITFIFO, c);
531 wait_for_xmitr(up);
532 sio_out(up, TXX9_SIDICR, ier);
539 struct uart_txx9_port *up = to_uart_txx9_port(port);
547 sio_set(up, TXX9_SIFCR,
550 sio_mask(up, TXX9_SIFCR,
552 sio_out(up, TXX9_SIDICR, 0);
557 sio_out(up, TXX9_SIDISR, 0);
559 retval = request_irq(up->port.irq, serial_txx9_interrupt,
560 IRQF_SHARED, "serial_txx9", up);
567 spin_lock_irqsave(&up->port.lock, flags);
568 serial_txx9_set_mctrl(&up->port, up->port.mctrl);
569 spin_unlock_irqrestore(&up->port.lock, flags);
572 sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE);
577 sio_set(up, TXX9_SIDICR, TXX9_SIDICR_RIE);
584 struct uart_txx9_port *up = to_uart_txx9_port(port);
590 sio_out(up, TXX9_SIDICR, 0); /* disable all intrs */
592 spin_lock_irqsave(&up->port.lock, flags);
593 serial_txx9_set_mctrl(&up->port, up->port.mctrl);
594 spin_unlock_irqrestore(&up->port.lock, flags);
599 sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_TBRK);
602 if (up->port.cons && up->port.line == up->port.cons->index) {
603 free_irq(up->port.irq, up);
608 sio_set(up, TXX9_SIFCR,
611 sio_mask(up, TXX9_SIFCR,
615 sio_set(up, TXX9_SIFLCR, TXX9_SIFLCR_RSDE | TXX9_SIFLCR_TSDE);
617 free_irq(up->port.irq, up);
624 struct uart_txx9_port *up = to_uart_txx9_port(port);
635 cval = sio_in(up, TXX9_SILCR);
669 /* Set up FIFOs */
677 spin_lock_irqsave(&up->port.lock, flags);
684 up->port.read_status_mask = TXX9_SIDISR_UOER |
687 up->port.read_status_mask |= TXX9_SIDISR_UFER | TXX9_SIDISR_UPER;
689 up->port.read_status_mask |= TXX9_SIDISR_UBRK;
694 up->port.ignore_status_mask = 0;
696 up->port.ignore_status_mask |= TXX9_SIDISR_UPER | TXX9_SIDISR_UFER;
698 up->port.ignore_status_mask |= TXX9_SIDISR_UBRK;
704 up->port.ignore_status_mask |= TXX9_SIDISR_UOER;
711 up->port.ignore_status_mask |= TXX9_SIDISR_RDIS;
715 (up->port.flags & UPF_TXX9_HAVE_CTS_LINE)) {
716 sio_set(up, TXX9_SIFLCR,
719 sio_mask(up, TXX9_SIFLCR,
723 sio_out(up, TXX9_SILCR, cval);
724 sio_quot_set(up, quot);
725 sio_out(up, TXX9_SIFCR, fcr);
727 serial_txx9_set_mctrl(&up->port, up->port.mctrl);
728 spin_unlock_irqrestore(&up->port.lock, flags);
747 static int serial_txx9_request_resource(struct uart_txx9_port *up)
752 switch (up->port.iotype) {
754 if (!up->port.mapbase)
757 if (!request_mem_region(up->port.mapbase, size, "serial_txx9")) {
762 if (up->port.flags & UPF_IOREMAP) {
763 up->port.membase = ioremap(up->port.mapbase, size);
764 if (!up->port.membase) {
765 release_mem_region(up->port.mapbase, size);
772 if (!request_region(up->port.iobase, size, "serial_txx9"))
779 static void serial_txx9_release_resource(struct uart_txx9_port *up)
783 switch (up->port.iotype) {
785 if (!up->port.mapbase)
788 if (up->port.flags & UPF_IOREMAP) {
789 iounmap(up->port.membase);
790 up->port.membase = NULL;
793 release_mem_region(up->port.mapbase, size);
797 release_region(up->port.iobase, size);
804 struct uart_txx9_port *up = to_uart_txx9_port(port);
805 serial_txx9_release_resource(up);
810 struct uart_txx9_port *up = to_uart_txx9_port(port);
811 return serial_txx9_request_resource(up);
816 struct uart_txx9_port *up = to_uart_txx9_port(port);
823 ret = serial_txx9_request_resource(up);
827 up->port.fifosize = TXX9_SIO_TX_FIFO;
830 if (up->port.line == up->port.cons->index)
872 struct uart_txx9_port *up = &serial_txx9_ports[i];
874 up->port.line = i;
875 up->port.ops = &serial_txx9_pops;
876 up->port.dev = dev;
877 if (up->port.iobase || up->port.mapbase)
878 uart_add_one_port(drv, &up->port);
886 struct uart_txx9_port *up = to_uart_txx9_port(port);
888 wait_for_xmitr(up);
889 sio_out(up, TXX9_SITFIFO, ch);
901 struct uart_txx9_port *up = &serial_txx9_ports[co->index];
907 ier = sio_in(up, TXX9_SIDICR);
908 sio_out(up, TXX9_SIDICR, 0);
912 flcr = sio_in(up, TXX9_SIFLCR);
913 if (!(up->port.flags & UPF_CONS_FLOW) && (flcr & TXX9_SIFLCR_TES))
914 sio_out(up, TXX9_SIFLCR, flcr & ~TXX9_SIFLCR_TES);
916 uart_console_write(&up->port, s, count, serial_txx9_console_putchar);
922 wait_for_xmitr(up);
923 sio_out(up, TXX9_SIFLCR, flcr);
924 sio_out(up, TXX9_SIDICR, ier);
930 struct uart_txx9_port *up;
943 up = &serial_txx9_ports[co->index];
944 port = &up->port;
948 serial_txx9_initialize(&up->port);
1116 struct uart_txx9_port *up = &serial_txx9_ports[i];
1118 if (up->port.dev == &dev->dev)
1130 struct uart_txx9_port *up = &serial_txx9_ports[i];
1132 if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
1133 uart_suspend_port(&serial_txx9_reg, &up->port);
1144 struct uart_txx9_port *up = &serial_txx9_ports[i];
1146 if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
1147 uart_resume_port(&serial_txx9_reg, &up->port);
1203 struct uart_txx9_port *up = pci_get_drvdata(dev);
1205 if (up) {
1206 serial_txx9_unregister_port(up->port.line);
1214 struct uart_txx9_port *up = pci_get_drvdata(dev);
1216 if (up)
1217 uart_suspend_port(&serial_txx9_reg, &up->port);
1225 struct uart_txx9_port *up = pci_get_drvdata(dev);
1229 if (up)
1230 uart_resume_port(&serial_txx9_reg, &up->port);
1312 struct uart_txx9_port *up = &serial_txx9_ports[i];
1313 if (up->port.iobase || up->port.mapbase)
1314 uart_remove_one_port(&serial_txx9_reg, &up->port);