2 * drivers/serial/mpsc.c
4 * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240,
5 * GT64260, MV64340, MV64360, GT96100, ... ).
7 * Author: Mark A. Greer <mgreer@mvista.com>
9 * Based on an old MPSC driver that was in the linuxppc tree. It appears to
10 * have been created by Chris Zankel (formerly of MontaVista) but there
11 * is no proper Copyright so I'm not sure. Apparently, parts were also
12 * taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c
15 * 2004 (c) MontaVista, Software, Inc. This file is licensed under
16 * the terms of the GNU General Public License version 2. This program
17 * is licensed "as is" without any warranty of any kind, whether express
21 * The MPSC interface is much like a typical network controller's interface.
22 * That is, you set up separate rings of descriptors for transmitting and
23 * receiving data. There is also a pool of buffers with (one buffer per
24 * descriptor) that incoming data are dma'd into or outgoing data are dma'd
27 * The MPSC requires two other controllers to be able to work. The Baud Rate
28 * Generator (BRG) provides a clock at programmable frequencies which determines
29 * the baud rate. The Serial DMA Controller (SDMA) takes incoming data from the
30 * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the
31 * MPSC. It is actually the SDMA interrupt that the driver uses to keep the
32 * transmit and receive "engines" going (i.e., indicate data has been
33 * transmitted or received).
37 * 1) Some chips have an erratum where several regs cannot be
38 * read. To work around that, we keep a local copy of those regs in
41 * 2) Some chips have an erratum where the ctlr will hang when the SDMA ctlr
42 * accesses system mem with coherency enabled. For that reason, the driver
43 * assumes that coherency for that ctlr has been disabled. This means
44 * that when in a cache coherent system, the driver has to manually manage
45 * the data cache on the areas that it touches because the dma_* macro are
48 * 3) There is an erratum (on PPC) where you can't use the instruction to do
49 * a DMA_TO_DEVICE/cache clean so DMA_BIDIRECTIONAL/flushes are used in places
50 * where a DMA_TO_DEVICE/clean would have [otherwise] sufficed.
52 * 4) AFAICT, hardware flow control isn't supported by the controller --MAG.
58 * Define how this driver is known to the outside (we've been assigned a
59 * range on the "Low-density serial ports" major).
61 #define MPSC_MAJOR 204
62 #define MPSC_MINOR_START 44
63 #define MPSC_DRIVER_NAME "MPSC"
64 #define MPSC_DEVFS_NAME "ttymm/"
65 #define MPSC_DEV_NAME "ttyMM"
66 #define MPSC_VERSION "1.00"
68 static struct mpsc_port_info mpsc_ports[MPSC_NUM_CTLRS];
69 static struct mpsc_shared_regs mpsc_shared_regs;
70 static struct uart_driver mpsc_reg;
72 static void mpsc_start_rx(struct mpsc_port_info *pi);
73 static void mpsc_free_ring_mem(struct mpsc_port_info *pi);
74 static void mpsc_release_port(struct uart_port *port);
76 ******************************************************************************
78 * Baud Rate Generator Routines (BRG)
80 ******************************************************************************
83 mpsc_brg_init(struct mpsc_port_info *pi, u32 clk_src)
87 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
88 v = (v & ~(0xf << 18)) | ((clk_src & 0xf) << 18);
95 writel(v, pi->brg_base + BRG_BCR);
97 writel(readl(pi->brg_base + BRG_BTR) & 0xffff0000,
98 pi->brg_base + BRG_BTR);
103 mpsc_brg_enable(struct mpsc_port_info *pi)
107 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
112 writel(v, pi->brg_base + BRG_BCR);
117 mpsc_brg_disable(struct mpsc_port_info *pi)
121 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
126 writel(v, pi->brg_base + BRG_BCR);
131 mpsc_set_baudrate(struct mpsc_port_info *pi, u32 baud)
134 * To set the baud, we adjust the CDV field in the BRG_BCR reg.
135 * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1.
136 * However, the input clock is divided by 16 in the MPSC b/c of how
137 * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our
138 * calculation by 16 to account for that. So the real calculation
139 * that accounts for the way the mpsc is set up is:
140 * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1.
142 u32 cdv = (pi->port.uartclk / (baud << 5)) - 1;
145 mpsc_brg_disable(pi);
146 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
147 v = (v & 0xffff0000) | (cdv & 0xffff);
151 writel(v, pi->brg_base + BRG_BCR);
158 ******************************************************************************
160 * Serial DMA Routines (SDMA)
162 ******************************************************************************
166 mpsc_sdma_burstsize(struct mpsc_port_info *pi, u32 burst_size)
170 pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n",
171 pi->port.line, burst_size);
173 burst_size >>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */
176 v = 0x0; /* 1 64-bit word */
177 else if (burst_size < 4)
178 v = 0x1; /* 2 64-bit words */
179 else if (burst_size < 8)
180 v = 0x2; /* 4 64-bit words */
182 v = 0x3; /* 8 64-bit words */
184 writel((readl(pi->sdma_base + SDMA_SDC) & (0x3 << 12)) | (v << 12),
185 pi->sdma_base + SDMA_SDC);
190 mpsc_sdma_init(struct mpsc_port_info *pi, u32 burst_size)
192 pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi->port.line,
195 writel((readl(pi->sdma_base + SDMA_SDC) & 0x3ff) | 0x03f,
196 pi->sdma_base + SDMA_SDC);
197 mpsc_sdma_burstsize(pi, burst_size);
202 mpsc_sdma_intr_mask(struct mpsc_port_info *pi, u32 mask)
206 pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi->port.line, mask);
208 old = v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m :
209 readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
217 pi->shared_regs->SDMA_INTR_MASK_m = v;
218 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
226 mpsc_sdma_intr_unmask(struct mpsc_port_info *pi, u32 mask)
230 pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi->port.line,mask);
232 v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m :
233 readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
241 pi->shared_regs->SDMA_INTR_MASK_m = v;
242 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
247 mpsc_sdma_intr_ack(struct mpsc_port_info *pi)
249 pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi->port.line);
252 pi->shared_regs->SDMA_INTR_CAUSE_m = 0;
253 writel(0, pi->shared_regs->sdma_intr_base + SDMA_INTR_CAUSE);
258 mpsc_sdma_set_rx_ring(struct mpsc_port_info *pi, struct mpsc_rx_desc *rxre_p)
260 pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n",
261 pi->port.line, (u32) rxre_p);
263 writel((u32)rxre_p, pi->sdma_base + SDMA_SCRDP);
268 mpsc_sdma_set_tx_ring(struct mpsc_port_info *pi, struct mpsc_tx_desc *txre_p)
270 writel((u32)txre_p, pi->sdma_base + SDMA_SFTDP);
271 writel((u32)txre_p, pi->sdma_base + SDMA_SCTDP);
276 mpsc_sdma_cmd(struct mpsc_port_info *pi, u32 val)
280 v = readl(pi->sdma_base + SDMA_SDCM);
286 writel(v, pi->sdma_base + SDMA_SDCM);
292 mpsc_sdma_tx_active(struct mpsc_port_info *pi)
294 return readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_TXD;
298 mpsc_sdma_start_tx(struct mpsc_port_info *pi)
300 struct mpsc_tx_desc *txre, *txre_p;
302 /* If tx isn't running & there's a desc ready to go, start it */
303 if (!mpsc_sdma_tx_active(pi)) {
304 txre = (struct mpsc_tx_desc *)(pi->txr +
305 (pi->txr_tail * MPSC_TXRE_SIZE));
306 dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
307 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
308 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
309 invalidate_dcache_range((ulong)txre,
310 (ulong)txre + MPSC_TXRE_SIZE);
313 if (be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O) {
314 txre_p = (struct mpsc_tx_desc *)(pi->txr_p +
318 mpsc_sdma_set_tx_ring(pi, txre_p);
319 mpsc_sdma_cmd(pi, SDMA_SDCM_STD | SDMA_SDCM_TXD);
327 mpsc_sdma_stop(struct mpsc_port_info *pi)
329 pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi->port.line);
331 /* Abort any SDMA transfers */
332 mpsc_sdma_cmd(pi, 0);
333 mpsc_sdma_cmd(pi, SDMA_SDCM_AR | SDMA_SDCM_AT);
335 /* Clear the SDMA current and first TX and RX pointers */
336 mpsc_sdma_set_tx_ring(pi, NULL);
337 mpsc_sdma_set_rx_ring(pi, NULL);
339 /* Disable interrupts */
340 mpsc_sdma_intr_mask(pi, 0xf);
341 mpsc_sdma_intr_ack(pi);
347 ******************************************************************************
349 * Multi-Protocol Serial Controller Routines (MPSC)
351 ******************************************************************************
355 mpsc_hw_init(struct mpsc_port_info *pi)
359 pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi->port.line);
361 /* Set up clock routing */
362 if (pi->mirror_regs) {
363 v = pi->shared_regs->MPSC_MRR_m;
365 pi->shared_regs->MPSC_MRR_m = v;
366 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
368 v = pi->shared_regs->MPSC_RCRR_m;
369 v = (v & ~0xf0f) | 0x100;
370 pi->shared_regs->MPSC_RCRR_m = v;
371 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
373 v = pi->shared_regs->MPSC_TCRR_m;
374 v = (v & ~0xf0f) | 0x100;
375 pi->shared_regs->MPSC_TCRR_m = v;
376 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
379 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_MRR);
381 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
383 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
384 v = (v & ~0xf0f) | 0x100;
385 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
387 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
388 v = (v & ~0xf0f) | 0x100;
389 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
392 /* Put MPSC in UART mode & enabel Tx/Rx egines */
393 writel(0x000004c4, pi->mpsc_base + MPSC_MMCRL);
395 /* No preamble, 16x divider, low-latency, */
396 writel(0x04400400, pi->mpsc_base + MPSC_MMCRH);
398 if (pi->mirror_regs) {
399 pi->MPSC_CHR_1_m = 0;
400 pi->MPSC_CHR_2_m = 0;
402 writel(0, pi->mpsc_base + MPSC_CHR_1);
403 writel(0, pi->mpsc_base + MPSC_CHR_2);
404 writel(pi->mpsc_max_idle, pi->mpsc_base + MPSC_CHR_3);
405 writel(0, pi->mpsc_base + MPSC_CHR_4);
406 writel(0, pi->mpsc_base + MPSC_CHR_5);
407 writel(0, pi->mpsc_base + MPSC_CHR_6);
408 writel(0, pi->mpsc_base + MPSC_CHR_7);
409 writel(0, pi->mpsc_base + MPSC_CHR_8);
410 writel(0, pi->mpsc_base + MPSC_CHR_9);
411 writel(0, pi->mpsc_base + MPSC_CHR_10);
417 mpsc_enter_hunt(struct mpsc_port_info *pi)
419 pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi->port.line);
421 if (pi->mirror_regs) {
422 writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_EH,
423 pi->mpsc_base + MPSC_CHR_2);
424 /* Erratum prevents reading CHR_2 so just delay for a while */
428 writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_EH,
429 pi->mpsc_base + MPSC_CHR_2);
431 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_EH)
439 mpsc_freeze(struct mpsc_port_info *pi)
443 pr_debug("mpsc_freeze[%d]: Freezing\n", pi->port.line);
445 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
446 readl(pi->mpsc_base + MPSC_MPCR);
451 writel(v, pi->mpsc_base + MPSC_MPCR);
456 mpsc_unfreeze(struct mpsc_port_info *pi)
460 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
461 readl(pi->mpsc_base + MPSC_MPCR);
466 writel(v, pi->mpsc_base + MPSC_MPCR);
468 pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi->port.line);
473 mpsc_set_char_length(struct mpsc_port_info *pi, u32 len)
477 pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi->port.line,len);
479 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
480 readl(pi->mpsc_base + MPSC_MPCR);
481 v = (v & ~(0x3 << 12)) | ((len & 0x3) << 12);
485 writel(v, pi->mpsc_base + MPSC_MPCR);
490 mpsc_set_stop_bit_length(struct mpsc_port_info *pi, u32 len)
494 pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n",
497 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
498 readl(pi->mpsc_base + MPSC_MPCR);
500 v = (v & ~(1 << 14)) | ((len & 0x1) << 14);
504 writel(v, pi->mpsc_base + MPSC_MPCR);
509 mpsc_set_parity(struct mpsc_port_info *pi, u32 p)
513 pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi->port.line, p);
515 v = (pi->mirror_regs) ? pi->MPSC_CHR_2_m :
516 readl(pi->mpsc_base + MPSC_CHR_2);
519 v = (v & ~0xc000c) | (p << 18) | (p << 2);
522 pi->MPSC_CHR_2_m = v;
523 writel(v, pi->mpsc_base + MPSC_CHR_2);
528 ******************************************************************************
530 * Driver Init Routines
532 ******************************************************************************
536 mpsc_init_hw(struct mpsc_port_info *pi)
538 pr_debug("mpsc_init_hw[%d]: Initializing\n", pi->port.line);
540 mpsc_brg_init(pi, pi->brg_clk_src);
542 mpsc_sdma_init(pi, dma_get_cache_alignment()); /* burst a cacheline */
550 mpsc_alloc_ring_mem(struct mpsc_port_info *pi)
554 pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n",
557 if (!pi->dma_region) {
558 if (!dma_supported(pi->port.dev, 0xffffffff)) {
559 printk(KERN_ERR "MPSC: Inadequate DMA support\n");
562 else if ((pi->dma_region = dma_alloc_noncoherent(pi->port.dev,
563 MPSC_DMA_ALLOC_SIZE, &pi->dma_region_p, GFP_KERNEL))
566 printk(KERN_ERR "MPSC: Can't alloc Desc region\n");
575 mpsc_free_ring_mem(struct mpsc_port_info *pi)
577 pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi->port.line);
579 if (pi->dma_region) {
580 dma_free_noncoherent(pi->port.dev, MPSC_DMA_ALLOC_SIZE,
581 pi->dma_region, pi->dma_region_p);
582 pi->dma_region = NULL;
583 pi->dma_region_p = (dma_addr_t) NULL;
590 mpsc_init_rings(struct mpsc_port_info *pi)
592 struct mpsc_rx_desc *rxre;
593 struct mpsc_tx_desc *txre;
598 pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi->port.line);
600 BUG_ON(pi->dma_region == NULL);
602 memset(pi->dma_region, 0, MPSC_DMA_ALLOC_SIZE);
605 * Descriptors & buffers are multiples of cacheline size and must be
608 dp = ALIGN((u32) pi->dma_region, dma_get_cache_alignment());
609 dp_p = ALIGN((u32) pi->dma_region_p, dma_get_cache_alignment());
612 * Partition dma region into rx ring descriptor, rx buffers,
613 * tx ring descriptors, and tx buffers.
618 dp_p += MPSC_RXR_SIZE;
621 pi->rxb_p = (u8 *) dp_p;
623 dp_p += MPSC_RXB_SIZE;
630 dp_p += MPSC_TXR_SIZE;
633 pi->txb_p = (u8 *) dp_p;
638 /* Init rx ring descriptors */
644 for (i = 0; i < MPSC_RXR_ENTRIES; i++) {
645 rxre = (struct mpsc_rx_desc *)dp;
647 rxre->bufsize = cpu_to_be16(MPSC_RXBE_SIZE);
648 rxre->bytecnt = cpu_to_be16(0);
649 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
650 SDMA_DESC_CMDSTAT_EI |
651 SDMA_DESC_CMDSTAT_F |
652 SDMA_DESC_CMDSTAT_L);
653 rxre->link = cpu_to_be32(dp_p + MPSC_RXRE_SIZE);
654 rxre->buf_ptr = cpu_to_be32(bp_p);
656 dp += MPSC_RXRE_SIZE;
657 dp_p += MPSC_RXRE_SIZE;
658 bp += MPSC_RXBE_SIZE;
659 bp_p += MPSC_RXBE_SIZE;
661 rxre->link = cpu_to_be32(pi->rxr_p); /* Wrap last back to first */
663 /* Init tx ring descriptors */
669 for (i = 0; i < MPSC_TXR_ENTRIES; i++) {
670 txre = (struct mpsc_tx_desc *)dp;
672 txre->link = cpu_to_be32(dp_p + MPSC_TXRE_SIZE);
673 txre->buf_ptr = cpu_to_be32(bp_p);
675 dp += MPSC_TXRE_SIZE;
676 dp_p += MPSC_TXRE_SIZE;
677 bp += MPSC_TXBE_SIZE;
678 bp_p += MPSC_TXBE_SIZE;
680 txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */
682 dma_cache_sync((void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE,
684 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
685 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
686 flush_dcache_range((ulong)pi->dma_region,
687 (ulong)pi->dma_region + MPSC_DMA_ALLOC_SIZE);
694 mpsc_uninit_rings(struct mpsc_port_info *pi)
696 pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi->port.line);
698 BUG_ON(pi->dma_region == NULL);
717 mpsc_make_ready(struct mpsc_port_info *pi)
721 pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi->port.line);
725 if ((rc = mpsc_alloc_ring_mem(pi)))
735 ******************************************************************************
737 * Interrupt Handling Routines
739 ******************************************************************************
743 mpsc_rx_intr(struct mpsc_port_info *pi, struct pt_regs *regs)
745 struct mpsc_rx_desc *rxre;
746 struct tty_struct *tty = pi->port.info->tty;
747 u32 cmdstat, bytes_in, i;
750 char flag = TTY_NORMAL;
752 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
754 rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE));
756 dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
757 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
758 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
759 invalidate_dcache_range((ulong)rxre,
760 (ulong)rxre + MPSC_RXRE_SIZE);
764 * Loop through Rx descriptors handling ones that have been completed.
766 while (!((cmdstat = be32_to_cpu(rxre->cmdstat)) & SDMA_DESC_CMDSTAT_O)){
767 bytes_in = be16_to_cpu(rxre->bytecnt);
769 /* Following use of tty struct directly is deprecated */
770 if (unlikely((tty->flip.count + bytes_in) >= TTY_FLIPBUF_SIZE)){
771 if (tty->low_latency)
772 tty_flip_buffer_push(tty);
774 * If this failed then we will throw awa the bytes
775 * but mst do so to clear interrupts.
779 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
780 dma_cache_sync((void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
781 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
782 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
783 invalidate_dcache_range((ulong)bp,
784 (ulong)bp + MPSC_RXBE_SIZE);
788 * Other than for parity error, the manual provides little
789 * info on what data will be in a frame flagged by any of
790 * these errors. For parity error, it is the last byte in
791 * the buffer that had the error. As for the rest, I guess
792 * we'll assume there is no data in the buffer.
793 * If there is...it gets lost.
795 if (unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR |
796 SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) {
798 pi->port.icount.rx++;
800 if (cmdstat & SDMA_DESC_CMDSTAT_BR) { /* Break */
801 pi->port.icount.brk++;
803 if (uart_handle_break(&pi->port))
806 else if (cmdstat & SDMA_DESC_CMDSTAT_FR)/* Framing */
807 pi->port.icount.frame++;
808 else if (cmdstat & SDMA_DESC_CMDSTAT_OR) /* Overrun */
809 pi->port.icount.overrun++;
811 cmdstat &= pi->port.read_status_mask;
813 if (cmdstat & SDMA_DESC_CMDSTAT_BR)
815 else if (cmdstat & SDMA_DESC_CMDSTAT_FR)
817 else if (cmdstat & SDMA_DESC_CMDSTAT_OR)
819 else if (cmdstat & SDMA_DESC_CMDSTAT_PE)
823 if (uart_handle_sysrq_char(&pi->port, *bp, regs)) {
829 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR |
830 SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) &&
831 !(cmdstat & pi->port.ignore_status_mask))
833 tty_insert_flip_char(tty, *bp, flag);
835 for (i=0; i<bytes_in; i++)
836 tty_insert_flip_char(tty, *bp++, TTY_NORMAL);
838 pi->port.icount.rx += bytes_in;
842 rxre->bytecnt = cpu_to_be16(0);
844 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
845 SDMA_DESC_CMDSTAT_EI |
846 SDMA_DESC_CMDSTAT_F |
847 SDMA_DESC_CMDSTAT_L);
849 dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
850 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
851 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
852 flush_dcache_range((ulong)rxre,
853 (ulong)rxre + MPSC_RXRE_SIZE);
856 /* Advance to next descriptor */
857 pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1);
858 rxre = (struct mpsc_rx_desc *)(pi->rxr +
859 (pi->rxr_posn * MPSC_RXRE_SIZE));
860 dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
861 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
862 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
863 invalidate_dcache_range((ulong)rxre,
864 (ulong)rxre + MPSC_RXRE_SIZE);
870 /* Restart rx engine, if its stopped */
871 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
874 tty_flip_buffer_push(tty);
879 mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr)
881 struct mpsc_tx_desc *txre;
883 txre = (struct mpsc_tx_desc *)(pi->txr +
884 (pi->txr_head * MPSC_TXRE_SIZE));
886 txre->bytecnt = cpu_to_be16(count);
887 txre->shadow = txre->bytecnt;
888 wmb(); /* ensure cmdstat is last field updated */
889 txre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_F |
890 SDMA_DESC_CMDSTAT_L | ((intr) ?
894 dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL);
895 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
896 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
897 flush_dcache_range((ulong)txre,
898 (ulong)txre + MPSC_TXRE_SIZE);
905 mpsc_copy_tx_data(struct mpsc_port_info *pi)
907 struct circ_buf *xmit = &pi->port.info->xmit;
911 /* Make sure the desc ring isn't full */
912 while (CIRC_CNT(pi->txr_head, pi->txr_tail, MPSC_TXR_ENTRIES) <
913 (MPSC_TXR_ENTRIES - 1)) {
914 if (pi->port.x_char) {
916 * Ideally, we should use the TCS field in
917 * CHR_1 to put the x_char out immediately but
918 * errata prevents us from being able to read
919 * CHR_2 to know that its safe to write to
920 * CHR_1. Instead, just put it in-band with
921 * all the other Tx data.
923 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
924 *bp = pi->port.x_char;
928 else if (!uart_circ_empty(xmit) && !uart_tx_stopped(&pi->port)){
929 i = min((u32) MPSC_TXBE_SIZE,
930 (u32) uart_circ_chars_pending(xmit));
931 i = min(i, (u32) CIRC_CNT_TO_END(xmit->head, xmit->tail,
933 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
934 memcpy(bp, &xmit->buf[xmit->tail], i);
935 xmit->tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1);
937 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
938 uart_write_wakeup(&pi->port);
940 else /* All tx data copied into ring bufs */
943 dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
944 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
945 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
946 flush_dcache_range((ulong)bp,
947 (ulong)bp + MPSC_TXBE_SIZE);
949 mpsc_setup_tx_desc(pi, i, 1);
951 /* Advance to next descriptor */
952 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
959 mpsc_tx_intr(struct mpsc_port_info *pi)
961 struct mpsc_tx_desc *txre;
964 if (!mpsc_sdma_tx_active(pi)) {
965 txre = (struct mpsc_tx_desc *)(pi->txr +
966 (pi->txr_tail * MPSC_TXRE_SIZE));
968 dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
969 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
970 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
971 invalidate_dcache_range((ulong)txre,
972 (ulong)txre + MPSC_TXRE_SIZE);
975 while (!(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O)) {
977 pi->port.icount.tx += be16_to_cpu(txre->bytecnt);
978 pi->txr_tail = (pi->txr_tail+1) & (MPSC_TXR_ENTRIES-1);
980 /* If no more data to tx, fall out of loop */
981 if (pi->txr_head == pi->txr_tail)
984 txre = (struct mpsc_tx_desc *)(pi->txr +
985 (pi->txr_tail * MPSC_TXRE_SIZE));
986 dma_cache_sync((void *) txre, MPSC_TXRE_SIZE,
988 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
989 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
990 invalidate_dcache_range((ulong)txre,
991 (ulong)txre + MPSC_TXRE_SIZE);
995 mpsc_copy_tx_data(pi);
996 mpsc_sdma_start_tx(pi); /* start next desc if ready */
1003 * This is the driver's interrupt handler. To avoid a race, we first clear
1004 * the interrupt, then handle any completed Rx/Tx descriptors. When done
1005 * handling those descriptors, we restart the Rx/Tx engines if they're stopped.
1008 mpsc_sdma_intr(int irq, void *dev_id, struct pt_regs *regs)
1010 struct mpsc_port_info *pi = dev_id;
1014 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi->port.line);
1016 spin_lock_irqsave(&pi->port.lock, iflags);
1017 mpsc_sdma_intr_ack(pi);
1018 if (mpsc_rx_intr(pi, regs))
1020 if (mpsc_tx_intr(pi))
1022 spin_unlock_irqrestore(&pi->port.lock, iflags);
1024 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi->port.line);
1029 ******************************************************************************
1031 * serial_core.c Interface routines
1033 ******************************************************************************
1036 mpsc_tx_empty(struct uart_port *port)
1038 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1042 spin_lock_irqsave(&pi->port.lock, iflags);
1043 rc = mpsc_sdma_tx_active(pi) ? 0 : TIOCSER_TEMT;
1044 spin_unlock_irqrestore(&pi->port.lock, iflags);
1050 mpsc_set_mctrl(struct uart_port *port, uint mctrl)
1052 /* Have no way to set modem control lines AFAICT */
1057 mpsc_get_mctrl(struct uart_port *port)
1059 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1062 status = (pi->mirror_regs) ? pi->MPSC_CHR_10_m :
1063 readl(pi->mpsc_base + MPSC_CHR_10);
1067 mflags |= TIOCM_CTS;
1069 mflags |= TIOCM_CAR;
1071 return mflags | TIOCM_DSR; /* No way to tell if DSR asserted */
1075 mpsc_stop_tx(struct uart_port *port)
1077 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1079 pr_debug("mpsc_stop_tx[%d]\n", port->line);
1086 mpsc_start_tx(struct uart_port *port)
1088 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1091 mpsc_copy_tx_data(pi);
1092 mpsc_sdma_start_tx(pi);
1094 pr_debug("mpsc_start_tx[%d]\n", port->line);
1099 mpsc_start_rx(struct mpsc_port_info *pi)
1101 pr_debug("mpsc_start_rx[%d]: Starting...\n", pi->port.line);
1103 /* Issue a Receive Abort to clear any receive errors */
1104 writel(MPSC_CHR_2_RA, pi->mpsc_base + MPSC_CHR_2);
1106 mpsc_enter_hunt(pi);
1107 mpsc_sdma_cmd(pi, SDMA_SDCM_ERD);
1113 mpsc_stop_rx(struct uart_port *port)
1115 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1117 pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port->line);
1119 mpsc_sdma_cmd(pi, SDMA_SDCM_AR);
1124 mpsc_enable_ms(struct uart_port *port)
1126 return; /* Not supported */
1130 mpsc_break_ctl(struct uart_port *port, int ctl)
1132 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1136 v = ctl ? 0x00ff0000 : 0;
1138 spin_lock_irqsave(&pi->port.lock, flags);
1139 if (pi->mirror_regs)
1140 pi->MPSC_CHR_1_m = v;
1141 writel(v, pi->mpsc_base + MPSC_CHR_1);
1142 spin_unlock_irqrestore(&pi->port.lock, flags);
1148 mpsc_startup(struct uart_port *port)
1150 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1154 pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n",
1155 port->line, pi->port.irq);
1157 if ((rc = mpsc_make_ready(pi)) == 0) {
1158 /* Setup IRQ handler */
1159 mpsc_sdma_intr_ack(pi);
1161 /* If irq's are shared, need to set flag */
1162 if (mpsc_ports[0].port.irq == mpsc_ports[1].port.irq)
1165 if (request_irq(pi->port.irq, mpsc_sdma_intr, flag,
1167 printk(KERN_ERR "MPSC: Can't get SDMA IRQ %d\n",
1170 mpsc_sdma_intr_unmask(pi, 0xf);
1171 mpsc_sdma_set_rx_ring(pi, (struct mpsc_rx_desc *)(pi->rxr_p +
1172 (pi->rxr_posn * MPSC_RXRE_SIZE)));
1179 mpsc_shutdown(struct uart_port *port)
1181 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1183 pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port->line);
1186 free_irq(pi->port.irq, pi);
1191 mpsc_set_termios(struct uart_port *port, struct termios *termios,
1192 struct termios *old)
1194 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1197 u32 chr_bits, stop_bits, par;
1199 pi->c_iflag = termios->c_iflag;
1200 pi->c_cflag = termios->c_cflag;
1202 switch (termios->c_cflag & CSIZE) {
1204 chr_bits = MPSC_MPCR_CL_5;
1207 chr_bits = MPSC_MPCR_CL_6;
1210 chr_bits = MPSC_MPCR_CL_7;
1214 chr_bits = MPSC_MPCR_CL_8;
1218 if (termios->c_cflag & CSTOPB)
1219 stop_bits = MPSC_MPCR_SBL_2;
1221 stop_bits = MPSC_MPCR_SBL_1;
1223 par = MPSC_CHR_2_PAR_EVEN;
1224 if (termios->c_cflag & PARENB)
1225 if (termios->c_cflag & PARODD)
1226 par = MPSC_CHR_2_PAR_ODD;
1228 if (termios->c_cflag & CMSPAR) {
1229 if (termios->c_cflag & PARODD)
1230 par = MPSC_CHR_2_PAR_MARK;
1232 par = MPSC_CHR_2_PAR_SPACE;
1236 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk);
1238 spin_lock_irqsave(&pi->port.lock, flags);
1240 uart_update_timeout(port, termios->c_cflag, baud);
1242 mpsc_set_char_length(pi, chr_bits);
1243 mpsc_set_stop_bit_length(pi, stop_bits);
1244 mpsc_set_parity(pi, par);
1245 mpsc_set_baudrate(pi, baud);
1247 /* Characters/events to read */
1249 pi->port.read_status_mask = SDMA_DESC_CMDSTAT_OR;
1251 if (termios->c_iflag & INPCK)
1252 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE |
1253 SDMA_DESC_CMDSTAT_FR;
1255 if (termios->c_iflag & (BRKINT | PARMRK))
1256 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
1258 /* Characters/events to ignore */
1259 pi->port.ignore_status_mask = 0;
1261 if (termios->c_iflag & IGNPAR)
1262 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_PE |
1263 SDMA_DESC_CMDSTAT_FR;
1265 if (termios->c_iflag & IGNBRK) {
1266 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_BR;
1268 if (termios->c_iflag & IGNPAR)
1269 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_OR;
1272 /* Ignore all chars if CREAD not set */
1273 if (!(termios->c_cflag & CREAD))
1278 spin_unlock_irqrestore(&pi->port.lock, flags);
1283 mpsc_type(struct uart_port *port)
1285 pr_debug("mpsc_type[%d]: port type: %s\n", port->line,MPSC_DRIVER_NAME);
1286 return MPSC_DRIVER_NAME;
1290 mpsc_request_port(struct uart_port *port)
1292 /* Should make chip/platform specific call */
1297 mpsc_release_port(struct uart_port *port)
1299 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1302 mpsc_uninit_rings(pi);
1303 mpsc_free_ring_mem(pi);
1311 mpsc_config_port(struct uart_port *port, int flags)
1317 mpsc_verify_port(struct uart_port *port, struct serial_struct *ser)
1319 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1322 pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi->port.line);
1324 if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPSC)
1326 else if (pi->port.irq != ser->irq)
1328 else if (ser->io_type != SERIAL_IO_MEM)
1330 else if (pi->port.uartclk / 16 != ser->baud_base) /* Not sure */
1332 else if ((void *)pi->port.mapbase != ser->iomem_base)
1334 else if (pi->port.iobase != ser->port)
1336 else if (ser->hub6 != 0)
1342 static struct uart_ops mpsc_pops = {
1343 .tx_empty = mpsc_tx_empty,
1344 .set_mctrl = mpsc_set_mctrl,
1345 .get_mctrl = mpsc_get_mctrl,
1346 .stop_tx = mpsc_stop_tx,
1347 .start_tx = mpsc_start_tx,
1348 .stop_rx = mpsc_stop_rx,
1349 .enable_ms = mpsc_enable_ms,
1350 .break_ctl = mpsc_break_ctl,
1351 .startup = mpsc_startup,
1352 .shutdown = mpsc_shutdown,
1353 .set_termios = mpsc_set_termios,
1355 .release_port = mpsc_release_port,
1356 .request_port = mpsc_request_port,
1357 .config_port = mpsc_config_port,
1358 .verify_port = mpsc_verify_port,
1362 ******************************************************************************
1364 * Console Interface Routines
1366 ******************************************************************************
1369 #ifdef CONFIG_SERIAL_MPSC_CONSOLE
1371 mpsc_console_write(struct console *co, const char *s, uint count)
1373 struct mpsc_port_info *pi = &mpsc_ports[co->index];
1374 u8 *bp, *dp, add_cr = 0;
1377 while (mpsc_sdma_tx_active(pi))
1381 bp = dp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1383 for (i = 0; i < MPSC_TXBE_SIZE; i++) {
1394 if (*(s++) == '\n') { /* add '\r' after '\n' */
1403 dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
1404 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1405 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1406 flush_dcache_range((ulong)bp,
1407 (ulong)bp + MPSC_TXBE_SIZE);
1409 mpsc_setup_tx_desc(pi, i, 0);
1410 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
1411 mpsc_sdma_start_tx(pi);
1413 while (mpsc_sdma_tx_active(pi))
1416 pi->txr_tail = (pi->txr_tail + 1) & (MPSC_TXR_ENTRIES - 1);
1423 mpsc_console_setup(struct console *co, char *options)
1425 struct mpsc_port_info *pi;
1426 int baud, bits, parity, flow;
1428 pr_debug("mpsc_console_setup[%d]: options: %s\n", co->index, options);
1430 if (co->index >= MPSC_NUM_CTLRS)
1433 pi = &mpsc_ports[co->index];
1435 baud = pi->default_baud;
1436 bits = pi->default_bits;
1437 parity = pi->default_parity;
1438 flow = pi->default_flow;
1443 spin_lock_init(&pi->port.lock); /* Temporary fix--copied from 8250.c */
1446 uart_parse_options(options, &baud, &parity, &bits, &flow);
1448 return uart_set_options(&pi->port, co, baud, parity, bits, flow);
1451 static struct console mpsc_console = {
1452 .name = MPSC_DEV_NAME,
1453 .write = mpsc_console_write,
1454 .device = uart_console_device,
1455 .setup = mpsc_console_setup,
1456 .flags = CON_PRINTBUFFER,
1462 mpsc_late_console_init(void)
1464 pr_debug("mpsc_late_console_init: Enter\n");
1466 if (!(mpsc_console.flags & CON_ENABLED))
1467 register_console(&mpsc_console);
1471 late_initcall(mpsc_late_console_init);
1473 #define MPSC_CONSOLE &mpsc_console
1475 #define MPSC_CONSOLE NULL
1478 ******************************************************************************
1480 * Dummy Platform Driver to extract & map shared register regions
1482 ******************************************************************************
1485 mpsc_resource_err(char *s)
1487 printk(KERN_WARNING "MPSC: Platform device resource error in %s\n", s);
1492 mpsc_shared_map_regs(struct platform_device *pd)
1496 if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1497 MPSC_ROUTING_BASE_ORDER)) && request_mem_region(r->start,
1498 MPSC_ROUTING_REG_BLOCK_SIZE, "mpsc_routing_regs")) {
1500 mpsc_shared_regs.mpsc_routing_base = ioremap(r->start,
1501 MPSC_ROUTING_REG_BLOCK_SIZE);
1502 mpsc_shared_regs.mpsc_routing_base_p = r->start;
1505 mpsc_resource_err("MPSC routing base");
1509 if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1510 MPSC_SDMA_INTR_BASE_ORDER)) && request_mem_region(r->start,
1511 MPSC_SDMA_INTR_REG_BLOCK_SIZE, "sdma_intr_regs")) {
1513 mpsc_shared_regs.sdma_intr_base = ioremap(r->start,
1514 MPSC_SDMA_INTR_REG_BLOCK_SIZE);
1515 mpsc_shared_regs.sdma_intr_base_p = r->start;
1518 iounmap(mpsc_shared_regs.mpsc_routing_base);
1519 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
1520 MPSC_ROUTING_REG_BLOCK_SIZE);
1521 mpsc_resource_err("SDMA intr base");
1529 mpsc_shared_unmap_regs(void)
1531 if (!mpsc_shared_regs.mpsc_routing_base) {
1532 iounmap(mpsc_shared_regs.mpsc_routing_base);
1533 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
1534 MPSC_ROUTING_REG_BLOCK_SIZE);
1536 if (!mpsc_shared_regs.sdma_intr_base) {
1537 iounmap(mpsc_shared_regs.sdma_intr_base);
1538 release_mem_region(mpsc_shared_regs.sdma_intr_base_p,
1539 MPSC_SDMA_INTR_REG_BLOCK_SIZE);
1542 mpsc_shared_regs.mpsc_routing_base = NULL;
1543 mpsc_shared_regs.sdma_intr_base = NULL;
1545 mpsc_shared_regs.mpsc_routing_base_p = 0;
1546 mpsc_shared_regs.sdma_intr_base_p = 0;
1552 mpsc_shared_drv_probe(struct device *dev)
1554 struct platform_device *pd = to_platform_device(dev);
1555 struct mpsc_shared_pdata *pdata;
1559 if (!(rc = mpsc_shared_map_regs(pd))) {
1560 pdata = (struct mpsc_shared_pdata *)dev->platform_data;
1562 mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val;
1563 mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val;
1564 mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val;
1565 mpsc_shared_regs.SDMA_INTR_CAUSE_m =
1566 pdata->intr_cause_val;
1567 mpsc_shared_regs.SDMA_INTR_MASK_m =
1568 pdata->intr_mask_val;
1578 mpsc_shared_drv_remove(struct device *dev)
1580 struct platform_device *pd = to_platform_device(dev);
1584 mpsc_shared_unmap_regs();
1585 mpsc_shared_regs.MPSC_MRR_m = 0;
1586 mpsc_shared_regs.MPSC_RCRR_m = 0;
1587 mpsc_shared_regs.MPSC_TCRR_m = 0;
1588 mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0;
1589 mpsc_shared_regs.SDMA_INTR_MASK_m = 0;
1596 static struct device_driver mpsc_shared_driver = {
1597 .name = MPSC_SHARED_NAME,
1598 .bus = &platform_bus_type,
1599 .probe = mpsc_shared_drv_probe,
1600 .remove = mpsc_shared_drv_remove,
1604 ******************************************************************************
1606 * Driver Interface Routines
1608 ******************************************************************************
1610 static struct uart_driver mpsc_reg = {
1611 .owner = THIS_MODULE,
1612 .driver_name = MPSC_DRIVER_NAME,
1613 .devfs_name = MPSC_DEVFS_NAME,
1614 .dev_name = MPSC_DEV_NAME,
1615 .major = MPSC_MAJOR,
1616 .minor = MPSC_MINOR_START,
1617 .nr = MPSC_NUM_CTLRS,
1618 .cons = MPSC_CONSOLE,
1622 mpsc_drv_map_regs(struct mpsc_port_info *pi, struct platform_device *pd)
1626 if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_BASE_ORDER)) &&
1627 request_mem_region(r->start, MPSC_REG_BLOCK_SIZE, "mpsc_regs")){
1629 pi->mpsc_base = ioremap(r->start, MPSC_REG_BLOCK_SIZE);
1630 pi->mpsc_base_p = r->start;
1633 mpsc_resource_err("MPSC base");
1637 if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1638 MPSC_SDMA_BASE_ORDER)) && request_mem_region(r->start,
1639 MPSC_SDMA_REG_BLOCK_SIZE, "sdma_regs")) {
1641 pi->sdma_base = ioremap(r->start,MPSC_SDMA_REG_BLOCK_SIZE);
1642 pi->sdma_base_p = r->start;
1645 mpsc_resource_err("SDMA base");
1649 if ((r = platform_get_resource(pd,IORESOURCE_MEM,MPSC_BRG_BASE_ORDER))
1650 && request_mem_region(r->start, MPSC_BRG_REG_BLOCK_SIZE,
1653 pi->brg_base = ioremap(r->start, MPSC_BRG_REG_BLOCK_SIZE);
1654 pi->brg_base_p = r->start;
1657 mpsc_resource_err("BRG base");
1665 mpsc_drv_unmap_regs(struct mpsc_port_info *pi)
1667 if (!pi->mpsc_base) {
1668 iounmap(pi->mpsc_base);
1669 release_mem_region(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE);
1671 if (!pi->sdma_base) {
1672 iounmap(pi->sdma_base);
1673 release_mem_region(pi->sdma_base_p, MPSC_SDMA_REG_BLOCK_SIZE);
1675 if (!pi->brg_base) {
1676 iounmap(pi->brg_base);
1677 release_mem_region(pi->brg_base_p, MPSC_BRG_REG_BLOCK_SIZE);
1680 pi->mpsc_base = NULL;
1681 pi->sdma_base = NULL;
1682 pi->brg_base = NULL;
1684 pi->mpsc_base_p = 0;
1685 pi->sdma_base_p = 0;
1692 mpsc_drv_get_platform_data(struct mpsc_port_info *pi,
1693 struct platform_device *pd, int num)
1695 struct mpsc_pdata *pdata;
1697 pdata = (struct mpsc_pdata *)pd->dev.platform_data;
1699 pi->port.uartclk = pdata->brg_clk_freq;
1700 pi->port.iotype = UPIO_MEM;
1701 pi->port.line = num;
1702 pi->port.type = PORT_MPSC;
1703 pi->port.fifosize = MPSC_TXBE_SIZE;
1704 pi->port.membase = pi->mpsc_base;
1705 pi->port.mapbase = (ulong)pi->mpsc_base;
1706 pi->port.ops = &mpsc_pops;
1708 pi->mirror_regs = pdata->mirror_regs;
1709 pi->cache_mgmt = pdata->cache_mgmt;
1710 pi->brg_can_tune = pdata->brg_can_tune;
1711 pi->brg_clk_src = pdata->brg_clk_src;
1712 pi->mpsc_max_idle = pdata->max_idle;
1713 pi->default_baud = pdata->default_baud;
1714 pi->default_bits = pdata->default_bits;
1715 pi->default_parity = pdata->default_parity;
1716 pi->default_flow = pdata->default_flow;
1718 /* Initial values of mirrored regs */
1719 pi->MPSC_CHR_1_m = pdata->chr_1_val;
1720 pi->MPSC_CHR_2_m = pdata->chr_2_val;
1721 pi->MPSC_CHR_10_m = pdata->chr_10_val;
1722 pi->MPSC_MPCR_m = pdata->mpcr_val;
1723 pi->BRG_BCR_m = pdata->bcr_val;
1725 pi->shared_regs = &mpsc_shared_regs;
1727 pi->port.irq = platform_get_irq(pd, 0);
1733 mpsc_drv_probe(struct device *dev)
1735 struct platform_device *pd = to_platform_device(dev);
1736 struct mpsc_port_info *pi;
1739 pr_debug("mpsc_drv_probe: Adding MPSC %d\n", pd->id);
1741 if (pd->id < MPSC_NUM_CTLRS) {
1742 pi = &mpsc_ports[pd->id];
1744 if (!(rc = mpsc_drv_map_regs(pi, pd))) {
1745 mpsc_drv_get_platform_data(pi, pd, pd->id);
1747 if (!(rc = mpsc_make_ready(pi)))
1748 if (!(rc = uart_add_one_port(&mpsc_reg,
1753 (struct uart_port *)pi);
1754 mpsc_drv_unmap_regs(pi);
1757 mpsc_drv_unmap_regs(pi);
1765 mpsc_drv_remove(struct device *dev)
1767 struct platform_device *pd = to_platform_device(dev);
1769 pr_debug("mpsc_drv_exit: Removing MPSC %d\n", pd->id);
1771 if (pd->id < MPSC_NUM_CTLRS) {
1772 uart_remove_one_port(&mpsc_reg, &mpsc_ports[pd->id].port);
1773 mpsc_release_port((struct uart_port *)&mpsc_ports[pd->id].port);
1774 mpsc_drv_unmap_regs(&mpsc_ports[pd->id]);
1781 static struct device_driver mpsc_driver = {
1782 .name = MPSC_CTLR_NAME,
1783 .bus = &platform_bus_type,
1784 .probe = mpsc_drv_probe,
1785 .remove = mpsc_drv_remove,
1793 printk(KERN_INFO "Serial: MPSC driver $Revision: 1.00 $\n");
1795 memset(mpsc_ports, 0, sizeof(mpsc_ports));
1796 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
1798 if (!(rc = uart_register_driver(&mpsc_reg))) {
1799 if (!(rc = driver_register(&mpsc_shared_driver))) {
1800 if ((rc = driver_register(&mpsc_driver))) {
1801 driver_unregister(&mpsc_shared_driver);
1802 uart_unregister_driver(&mpsc_reg);
1806 uart_unregister_driver(&mpsc_reg);
1816 driver_unregister(&mpsc_driver);
1817 driver_unregister(&mpsc_shared_driver);
1818 uart_unregister_driver(&mpsc_reg);
1819 memset(mpsc_ports, 0, sizeof(mpsc_ports));
1820 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
1824 module_init(mpsc_drv_init);
1825 module_exit(mpsc_drv_exit);
1827 MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>");
1828 MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver $Revision: 1.00 $");
1829 MODULE_VERSION(MPSC_VERSION);
1830 MODULE_LICENSE("GPL");
1831 MODULE_ALIAS_CHARDEV_MAJOR(MPSC_MAJOR);