2 * Simple synchronous serial port driver for ETRAX FS and Artpec-3.
4 * Copyright (c) 2005 Axis Communications AB
6 * Author: Mikael Starvik
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/major.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/poll.h>
19 #include <linux/init.h>
20 #include <linux/timer.h>
21 #include <linux/spinlock.h>
26 #include <hwregs/reg_rdwr.h>
27 #include <hwregs/sser_defs.h>
28 #include <hwregs/dma_defs.h>
29 #include <hwregs/dma.h>
30 #include <hwregs/intr_vect_defs.h>
31 #include <hwregs/intr_vect.h>
32 #include <hwregs/reg_map.h>
33 #include <asm/sync_serial.h>
36 /* The receiver is a bit tricky beacuse of the continuous stream of data.*/
38 /* Three DMA descriptors are linked together. Each DMA descriptor is */
39 /* responsible for port->bufchunk of a common buffer. */
41 /* +---------------------------------------------+ */
42 /* | +----------+ +----------+ +----------+ | */
43 /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */
44 /* +----------+ +----------+ +----------+ */
47 /* +-------------------------------------+ */
49 /* +-------------------------------------+ */
50 /* |<- data_avail ->| */
53 /* If the application keeps up the pace readp will be right after writep.*/
54 /* If the application can't keep the pace we have to throw away data. */
55 /* The idea is that readp should be ready with the data pointed out by */
56 /* Descr[i] when the DMA has filled in Descr[i+1]. */
57 /* Otherwise we will discard */
58 /* the rest of the data pointed out by Descr1 and set readp to the start */
61 #define SYNC_SERIAL_MAJOR 125
63 /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
64 /* words can be handled */
65 #define IN_BUFFER_SIZE 12288
66 #define IN_DESCR_SIZE 256
67 #define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
69 #define OUT_BUFFER_SIZE 1024*8
70 #define NBR_OUT_DESCR 8
72 #define DEFAULT_FRAME_RATE 0
73 #define DEFAULT_WORD_RATE 7
75 /* NOTE: Enabling some debug will likely cause overrun or underrun,
76 * especially if manual mode is use.
85 #define DEBUGOUTBUF(x)
87 typedef struct sync_port
89 reg_scope_instances regi_sser;
90 reg_scope_instances regi_dmain;
91 reg_scope_instances regi_dmaout;
93 char started; /* 1 if port has been started */
94 char port_nbr; /* Port 0 or 1 */
95 char busy; /* 1 if port is busy */
97 char enabled; /* 1 if port is enabled */
98 char use_dma; /* 1 if port uses dma */
105 /* Next byte to be read by application */
106 volatile unsigned char *volatile readp;
107 /* Next byte to be written by etrax */
108 volatile unsigned char *volatile writep;
110 unsigned int in_buffer_size;
111 unsigned int inbufchunk;
112 unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32)));
113 unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32)));
114 unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32)));
115 struct dma_descr_data* next_rx_desc;
116 struct dma_descr_data* prev_rx_desc;
118 /* Pointer to the first available descriptor in the ring,
119 * unless active_tr_descr == catch_tr_descr and a dma
120 * transfer is active */
121 struct dma_descr_data *active_tr_descr;
123 /* Pointer to the first allocated descriptor in the ring */
124 struct dma_descr_data *catch_tr_descr;
126 /* Pointer to the descriptor with the current end-of-list */
127 struct dma_descr_data *prev_tr_descr;
130 /* Pointer to the first byte being read by DMA
131 * or current position in out_buffer if not using DMA. */
132 unsigned char *out_rd_ptr;
134 /* Number of bytes currently locked for being read by DMA */
137 dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16)));
138 dma_descr_context in_context __attribute__ ((__aligned__(32)));
139 dma_descr_data out_descr[NBR_OUT_DESCR]
140 __attribute__ ((__aligned__(16)));
141 dma_descr_context out_context __attribute__ ((__aligned__(32)));
142 wait_queue_head_t out_wait_q;
143 wait_queue_head_t in_wait_q;
148 static int etrax_sync_serial_init(void);
149 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
150 static void initialize_port(int portnbr);
152 static inline int sync_data_avail(struct sync_port *port);
154 static int sync_serial_open(struct inode *, struct file*);
155 static int sync_serial_release(struct inode*, struct file*);
156 static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
158 static int sync_serial_ioctl(struct inode*, struct file*,
159 unsigned int cmd, unsigned long arg);
160 static ssize_t sync_serial_write(struct file * file, const char * buf,
161 size_t count, loff_t *ppos);
162 static ssize_t sync_serial_read(struct file *file, char *buf,
163 size_t count, loff_t *ppos);
165 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
166 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
167 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
168 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
172 static void send_word(sync_port* port);
173 static void start_dma_out(struct sync_port *port, const char *data, int count);
174 static void start_dma_in(sync_port* port);
176 static irqreturn_t tr_interrupt(int irq, void *dev_id);
177 static irqreturn_t rx_interrupt(int irq, void *dev_id);
180 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
181 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
182 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
183 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
184 #define SYNC_SER_MANUAL
186 #ifdef SYNC_SER_MANUAL
187 static irqreturn_t manual_interrupt(int irq, void *dev_id);
190 #ifdef CONFIG_ETRAXFS /* ETRAX FS */
191 #define OUT_DMA_NBR 4
193 #define PINMUX_SSER pinmux_sser0
194 #define SYNCSER_INST regi_sser0
195 #define SYNCSER_INTR_VECT SSER0_INTR_VECT
196 #define OUT_DMA_INST regi_dma4
197 #define IN_DMA_INST regi_dma5
198 #define DMA_OUT_INTR_VECT DMA4_INTR_VECT
199 #define DMA_IN_INTR_VECT DMA5_INTR_VECT
200 #define REQ_DMA_SYNCSER dma_sser0
202 #define OUT_DMA_NBR 6
204 #define PINMUX_SSER pinmux_sser
205 #define SYNCSER_INST regi_sser
206 #define SYNCSER_INTR_VECT SSER_INTR_VECT
207 #define OUT_DMA_INST regi_dma6
208 #define IN_DMA_INST regi_dma7
209 #define DMA_OUT_INTR_VECT DMA6_INTR_VECT
210 #define DMA_IN_INTR_VECT DMA7_INTR_VECT
211 #define REQ_DMA_SYNCSER dma_sser
215 static struct sync_port ports[]=
218 .regi_sser = SYNCSER_INST,
219 .regi_dmaout = OUT_DMA_INST,
220 .regi_dmain = IN_DMA_INST,
221 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
227 #ifdef CONFIG_ETRAXFS
231 .regi_sser = regi_sser1,
232 .regi_dmaout = regi_dma6,
233 .regi_dmain = regi_dma7,
234 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
243 #define NBR_PORTS ARRAY_SIZE(ports)
245 static const struct file_operations sync_serial_fops = {
246 .owner = THIS_MODULE,
247 .write = sync_serial_write,
248 .read = sync_serial_read,
249 .poll = sync_serial_poll,
250 .ioctl = sync_serial_ioctl,
251 .open = sync_serial_open,
252 .release = sync_serial_release
255 static int __init etrax_sync_serial_init(void)
257 ports[0].enabled = 0;
258 #ifdef CONFIG_ETRAXFS
259 ports[1].enabled = 0;
261 if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial",
262 &sync_serial_fops) < 0) {
264 "Unable to get major for synchronous serial port\n");
268 /* Initialize Ports */
269 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
270 if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) {
272 "Unable to alloc pins for synchronous serial port 0\n");
275 ports[0].enabled = 1;
279 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
280 if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) {
282 "Unable to alloc pins for synchronous serial port 0\n");
285 ports[1].enabled = 1;
289 #ifdef CONFIG_ETRAXFS
290 printk(KERN_INFO "ETRAX FS synchronous serial port driver\n");
292 printk(KERN_INFO "Artpec-3 synchronous serial port driver\n");
297 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
298 static void __init initialize_port(int portnbr)
300 int __attribute__((unused)) i;
301 struct sync_port *port = &ports[portnbr];
302 reg_sser_rw_cfg cfg = {0};
303 reg_sser_rw_frm_cfg frm_cfg = {0};
304 reg_sser_rw_tr_cfg tr_cfg = {0};
305 reg_sser_rw_rec_cfg rec_cfg = {0};
307 DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr));
309 port->port_nbr = portnbr;
312 port->out_rd_ptr = port->out_buffer;
313 port->out_buf_count = 0;
318 port->readp = port->flip;
319 port->writep = port->flip;
320 port->in_buffer_size = IN_BUFFER_SIZE;
321 port->inbufchunk = IN_DESCR_SIZE;
322 port->next_rx_desc = &port->in_descr[0];
323 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1];
324 port->prev_rx_desc->eol = 1;
326 init_waitqueue_head(&port->out_wait_q);
327 init_waitqueue_head(&port->in_wait_q);
329 spin_lock_init(&port->lock);
331 cfg.out_clk_src = regk_sser_intern_clk;
332 cfg.out_clk_pol = regk_sser_pos;
333 cfg.clk_od_mode = regk_sser_no;
334 cfg.clk_dir = regk_sser_out;
335 cfg.gate_clk = regk_sser_no;
336 cfg.base_freq = regk_sser_f29_493;
338 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
340 frm_cfg.wordrate = DEFAULT_WORD_RATE;
341 frm_cfg.type = regk_sser_edge;
342 frm_cfg.frame_pin_dir = regk_sser_out;
343 frm_cfg.frame_pin_use = regk_sser_frm;
344 frm_cfg.status_pin_dir = regk_sser_in;
345 frm_cfg.status_pin_use = regk_sser_hold;
346 frm_cfg.out_on = regk_sser_tr;
347 frm_cfg.tr_delay = 1;
348 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
350 tr_cfg.urun_stop = regk_sser_no;
351 tr_cfg.sample_size = 7;
352 tr_cfg.sh_dir = regk_sser_msbfirst;
353 tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
355 tr_cfg.rate_ctrl = regk_sser_bulk;
356 tr_cfg.data_pin_use = regk_sser_dout;
358 tr_cfg.rate_ctrl = regk_sser_iso;
359 tr_cfg.data_pin_use = regk_sser_dout;
361 tr_cfg.bulk_wspace = 1;
362 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
364 rec_cfg.sample_size = 7;
365 rec_cfg.sh_dir = regk_sser_msbfirst;
366 rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
367 rec_cfg.fifo_thr = regk_sser_inf;
368 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
371 /* Setup the descriptor ring for dma out/transmit. */
372 for (i = 0; i < NBR_OUT_DESCR; i++) {
373 port->out_descr[i].wait = 0;
374 port->out_descr[i].intr = 1;
375 port->out_descr[i].eol = 0;
376 port->out_descr[i].out_eop = 0;
377 port->out_descr[i].next =
378 (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]);
381 /* Create a ring from the list. */
382 port->out_descr[NBR_OUT_DESCR-1].next =
383 (dma_descr_data *)virt_to_phys(&port->out_descr[0]);
385 /* Setup context for traversing the ring. */
386 port->active_tr_descr = &port->out_descr[0];
387 port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1];
388 port->catch_tr_descr = &port->out_descr[0];
393 static inline int sync_data_avail(struct sync_port *port)
396 unsigned char *start;
399 start = (unsigned char*)port->readp; /* cast away volatile */
400 end = (unsigned char*)port->writep; /* cast away volatile */
401 /* 0123456789 0123456789
409 avail = port->in_buffer_size - (start - end);
413 static inline int sync_data_avail_to_end(struct sync_port *port)
416 unsigned char *start;
419 start = (unsigned char*)port->readp; /* cast away volatile */
420 end = (unsigned char*)port->writep; /* cast away volatile */
421 /* 0123456789 0123456789
429 avail = port->flip + port->in_buffer_size - start;
433 static int sync_serial_open(struct inode *inode, struct file *file)
435 int dev = iminor(inode);
437 reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
438 reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
440 DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
442 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
444 DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev));
448 /* Allow open this device twice (assuming one reader and one writer) */
451 DEBUG(printk(KERN_DEBUG "Device is busy.. \n"));
456 if (port->init_irqs) {
458 if (port == &ports[0]) {
460 if (request_irq(DMA_OUT_INTR_VECT,
463 "synchronous serial 0 dma tr",
465 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
467 } else if (request_irq(DMA_IN_INTR_VECT,
470 "synchronous serial 1 dma rx",
472 free_irq(DMA_OUT_INTR_VECT, &port[0]);
473 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
475 } else if (crisv32_request_dma(OUT_DMA_NBR,
476 "synchronous serial 0 dma tr",
477 DMA_VERBOSE_ON_ERROR,
480 free_irq(DMA_OUT_INTR_VECT, &port[0]);
481 free_irq(DMA_IN_INTR_VECT, &port[0]);
482 printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
484 } else if (crisv32_request_dma(IN_DMA_NBR,
485 "synchronous serial 0 dma rec",
486 DMA_VERBOSE_ON_ERROR,
489 crisv32_free_dma(OUT_DMA_NBR);
490 free_irq(DMA_OUT_INTR_VECT, &port[0]);
491 free_irq(DMA_IN_INTR_VECT, &port[0]);
492 printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
497 #ifdef CONFIG_ETRAXFS
498 else if (port == &ports[1]) {
500 if (request_irq(DMA6_INTR_VECT,
503 "synchronous serial 1 dma tr",
505 printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
507 } else if (request_irq(DMA7_INTR_VECT,
510 "synchronous serial 1 dma rx",
512 free_irq(DMA6_INTR_VECT, &ports[1]);
513 printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
515 } else if (crisv32_request_dma(
516 SYNC_SER1_TX_DMA_NBR,
517 "synchronous serial 1 dma tr",
518 DMA_VERBOSE_ON_ERROR,
521 free_irq(DMA6_INTR_VECT, &ports[1]);
522 free_irq(DMA7_INTR_VECT, &ports[1]);
523 printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
525 } else if (crisv32_request_dma(
526 SYNC_SER1_RX_DMA_NBR,
527 "synchronous serial 3 dma rec",
528 DMA_VERBOSE_ON_ERROR,
531 crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
532 free_irq(DMA6_INTR_VECT, &ports[1]);
533 free_irq(DMA7_INTR_VECT, &ports[1]);
534 printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
541 REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
542 REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
543 /* Enable DMA IRQs */
544 REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
545 REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
546 /* Set up wordsize = 1 for DMAs. */
547 DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
548 DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
552 } else { /* !port->use_dma */
553 #ifdef SYNC_SER_MANUAL
554 if (port == &ports[0]) {
555 if (request_irq(SYNCSER_INTR_VECT,
558 "synchronous serial manual irq",
560 printk("Can't allocate sync serial manual irq");
564 #ifdef CONFIG_ETRAXFS
565 else if (port == &ports[1]) {
566 if (request_irq(SSER1_INTR_VECT,
569 "synchronous serial manual irq",
571 printk(KERN_CRIT "Can't allocate sync serial manual irq");
578 panic("sync_serial: Manual mode not supported.\n");
579 #endif /* SYNC_SER_MANUAL */
582 } /* port->init_irqs */
588 static int sync_serial_release(struct inode *inode, struct file *file)
590 int dev = iminor(inode);
593 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
595 DEBUG(printk("Invalid minor %d\n", dev));
606 static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
608 int dev = iminor(file->f_path.dentry->d_inode);
609 unsigned int mask = 0;
611 DEBUGPOLL( static unsigned int prev_mask = 0; );
615 if (!port->started) {
616 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
617 reg_sser_rw_rec_cfg rec_cfg =
618 REG_RD(sser, port->regi_sser, rw_rec_cfg);
619 cfg.en = regk_sser_yes;
620 rec_cfg.rec_en = port->input;
621 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
622 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
626 poll_wait(file, &port->out_wait_q, wait);
627 poll_wait(file, &port->in_wait_q, wait);
629 /* No active transfer, descriptors are available */
630 if (port->output && !port->tr_running)
631 mask |= POLLOUT | POLLWRNORM;
633 /* Descriptor and buffer space available. */
635 port->active_tr_descr != port->catch_tr_descr &&
636 port->out_buf_count < OUT_BUFFER_SIZE)
637 mask |= POLLOUT | POLLWRNORM;
639 /* At least an inbufchunk of data */
640 if (port->input && sync_data_avail(port) >= port->inbufchunk)
641 mask |= POLLIN | POLLRDNORM;
643 DEBUGPOLL(if (mask != prev_mask)
644 printk("sync_serial_poll: mask 0x%08X %s %s\n", mask,
645 mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":"");
651 static int sync_serial_ioctl(struct inode *inode, struct file *file,
652 unsigned int cmd, unsigned long arg)
655 int dma_w_size = regk_dma_set_w_size1;
656 int dev = iminor(file->f_path.dentry->d_inode);
658 reg_sser_rw_tr_cfg tr_cfg;
659 reg_sser_rw_rec_cfg rec_cfg;
660 reg_sser_rw_frm_cfg frm_cfg;
661 reg_sser_rw_cfg gen_cfg;
662 reg_sser_rw_intr_mask intr_mask;
664 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
666 DEBUG(printk("Invalid minor %d\n", dev));
670 spin_lock_irq(&port->lock);
672 tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
673 rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
674 frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg);
675 gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
676 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
681 if (GET_SPEED(arg) == CODEC)
685 gen_cfg.base_freq = regk_sser_f32;
687 /* Clock divider will internally be
688 * gen_cfg.clk_div + 1.
691 freq = GET_FREQ(arg);
697 gen_cfg.clk_div = 125 *
698 (1 << (freq - FREQ_256kHz)) - 1;
701 gen_cfg.clk_div = 62;
706 gen_cfg.clk_div = 8 * (1 << freq) - 1;
710 gen_cfg.base_freq = regk_sser_f29_493;
711 switch (GET_SPEED(arg)) {
713 gen_cfg.clk_div = 29493000 / (150 * 8) - 1;
716 gen_cfg.clk_div = 29493000 / (300 * 8) - 1;
719 gen_cfg.clk_div = 29493000 / (600 * 8) - 1;
722 gen_cfg.clk_div = 29493000 / (1200 * 8) - 1;
725 gen_cfg.clk_div = 29493000 / (2400 * 8) - 1;
728 gen_cfg.clk_div = 29493000 / (4800 * 8) - 1;
731 gen_cfg.clk_div = 29493000 / (9600 * 8) - 1;
734 gen_cfg.clk_div = 29493000 / (19200 * 8) - 1;
737 gen_cfg.clk_div = 29493000 / (28800 * 8) - 1;
740 gen_cfg.clk_div = 29493000 / (57600 * 8) - 1;
743 gen_cfg.clk_div = 29493000 / (115200 * 8) - 1;
746 gen_cfg.clk_div = 29493000 / (230400 * 8) - 1;
749 gen_cfg.clk_div = 29493000 / (460800 * 8) - 1;
752 gen_cfg.clk_div = 29493000 / (921600 * 8) - 1;
755 gen_cfg.base_freq = regk_sser_f100;
756 gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1;
761 frm_cfg.wordrate = GET_WORD_RATE(arg);
770 frm_cfg.out_on = regk_sser_tr;
771 frm_cfg.frame_pin_dir = regk_sser_out;
772 gen_cfg.clk_dir = regk_sser_out;
777 frm_cfg.frame_pin_dir = regk_sser_in;
778 gen_cfg.clk_dir = regk_sser_in;
783 frm_cfg.frame_pin_dir = regk_sser_out;
784 frm_cfg.out_on = regk_sser_intern_tb;
785 gen_cfg.clk_dir = regk_sser_out;
790 frm_cfg.frame_pin_dir = regk_sser_in;
791 gen_cfg.clk_dir = regk_sser_in;
796 frm_cfg.frame_pin_dir = regk_sser_out;
797 frm_cfg.out_on = regk_sser_intern_tb;
798 gen_cfg.clk_dir = regk_sser_out;
803 frm_cfg.frame_pin_dir = regk_sser_in;
804 gen_cfg.clk_dir = regk_sser_in;
807 spin_unlock_irq(&port->lock);
810 if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT))
811 intr_mask.rdav = regk_sser_yes;
814 if (arg & NORMAL_SYNC) {
815 frm_cfg.rec_delay = 1;
816 frm_cfg.tr_delay = 1;
818 else if (arg & EARLY_SYNC)
819 frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
820 else if (arg & SECOND_WORD_SYNC) {
821 frm_cfg.rec_delay = 7;
822 frm_cfg.tr_delay = 1;
825 tr_cfg.bulk_wspace = frm_cfg.tr_delay;
826 frm_cfg.early_wend = regk_sser_yes;
828 frm_cfg.type = regk_sser_edge;
829 else if (arg & WORD_SYNC)
830 frm_cfg.type = regk_sser_level;
831 else if (arg & EXTENDED_SYNC)
832 frm_cfg.early_wend = regk_sser_no;
835 frm_cfg.frame_pin_use = regk_sser_frm;
836 else if (arg & SYNC_OFF)
837 frm_cfg.frame_pin_use = regk_sser_gio0;
839 dma_w_size = regk_dma_set_w_size2;
840 if (arg & WORD_SIZE_8) {
841 rec_cfg.sample_size = tr_cfg.sample_size = 7;
842 dma_w_size = regk_dma_set_w_size1;
843 } else if (arg & WORD_SIZE_12)
844 rec_cfg.sample_size = tr_cfg.sample_size = 11;
845 else if (arg & WORD_SIZE_16)
846 rec_cfg.sample_size = tr_cfg.sample_size = 15;
847 else if (arg & WORD_SIZE_24)
848 rec_cfg.sample_size = tr_cfg.sample_size = 23;
849 else if (arg & WORD_SIZE_32)
850 rec_cfg.sample_size = tr_cfg.sample_size = 31;
852 if (arg & BIT_ORDER_MSB)
853 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
854 else if (arg & BIT_ORDER_LSB)
855 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst;
857 if (arg & FLOW_CONTROL_ENABLE) {
858 frm_cfg.status_pin_use = regk_sser_frm;
859 rec_cfg.fifo_thr = regk_sser_thr16;
860 } else if (arg & FLOW_CONTROL_DISABLE) {
861 frm_cfg.status_pin_use = regk_sser_gio0;
862 rec_cfg.fifo_thr = regk_sser_inf;
865 if (arg & CLOCK_NOT_GATED)
866 gen_cfg.gate_clk = regk_sser_no;
867 else if (arg & CLOCK_GATED)
868 gen_cfg.gate_clk = regk_sser_yes;
872 /* NOTE!! negedge is considered NORMAL */
873 if (arg & CLOCK_NORMAL)
874 rec_cfg.clk_pol = regk_sser_neg;
875 else if (arg & CLOCK_INVERT)
876 rec_cfg.clk_pol = regk_sser_pos;
878 if (arg & FRAME_NORMAL)
879 frm_cfg.level = regk_sser_pos_hi;
880 else if (arg & FRAME_INVERT)
881 frm_cfg.level = regk_sser_neg_lo;
883 if (arg & STATUS_NORMAL)
884 gen_cfg.hold_pol = regk_sser_pos;
885 else if (arg & STATUS_INVERT)
886 gen_cfg.hold_pol = regk_sser_neg;
889 if (arg & CLOCK_NORMAL)
890 gen_cfg.out_clk_pol = regk_sser_pos;
891 else if (arg & CLOCK_INVERT)
892 gen_cfg.out_clk_pol = regk_sser_neg;
894 if (arg & FRAME_NORMAL)
895 frm_cfg.level = regk_sser_pos_hi;
896 else if (arg & FRAME_INVERT)
897 frm_cfg.level = regk_sser_neg_lo;
899 if (arg & STATUS_NORMAL)
900 gen_cfg.hold_pol = regk_sser_pos;
901 else if (arg & STATUS_INVERT)
902 gen_cfg.hold_pol = regk_sser_neg;
905 rec_cfg.fifo_thr = regk_sser_inf;
906 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
907 rec_cfg.sample_size = tr_cfg.sample_size = 7;
908 frm_cfg.frame_pin_use = regk_sser_frm;
909 frm_cfg.type = regk_sser_level;
910 frm_cfg.tr_delay = 1;
911 frm_cfg.level = regk_sser_neg_lo;
914 rec_cfg.clk_pol = regk_sser_neg;
915 gen_cfg.clk_dir = regk_sser_in;
921 gen_cfg.out_clk_pol = regk_sser_pos;
924 gen_cfg.clk_dir = regk_sser_out;
935 rec_cfg.rec_en = port->input;
936 gen_cfg.en = (port->output | port->input);
939 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
940 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
941 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
942 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
943 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
946 if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 |
947 WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) {
950 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
951 /* ##### Should DMA be stoped before we change dma size? */
952 DMA_WR_CMD(port->regi_dmain, dma_w_size);
953 DMA_WR_CMD(port->regi_dmaout, dma_w_size);
955 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
958 spin_unlock_irq(&port->lock);
962 /* NOTE: sync_serial_write does not support concurrency */
963 static ssize_t sync_serial_write(struct file *file, const char *buf,
964 size_t count, loff_t *ppos)
966 int dev = iminor(file->f_path.dentry->d_inode);
967 DECLARE_WAITQUEUE(wait, current);
968 struct sync_port *port;
974 unsigned char *rd_ptr; /* First allocated byte in the buffer */
975 unsigned char *wr_ptr; /* First free byte in the buffer */
976 unsigned char *buf_stop_ptr; /* Last byte + 1 */
978 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
979 DEBUG(printk("Invalid minor %d\n", dev));
984 /* |<- OUT_BUFFER_SIZE ->|
985 * |<- out_buf_count ->|
986 * |<- trunc_count ->| ...->|
987 * ______________________________________________________
988 * | free | data | free |
989 * |_________|___________________|________________________|
992 DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n",
993 port->port_nbr, count, port->active_tr_descr,
994 port->catch_tr_descr));
996 /* Read variables that may be updated by interrupts */
997 spin_lock_irqsave(&port->lock, flags);
998 rd_ptr = port->out_rd_ptr;
999 out_buf_count = port->out_buf_count;
1000 spin_unlock_irqrestore(&port->lock, flags);
1002 /* Check if resources are available */
1003 if (port->tr_running &&
1004 ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
1005 out_buf_count >= OUT_BUFFER_SIZE)) {
1006 DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev));
1010 buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE;
1012 /* Determine pointer to the first free byte, before copying. */
1013 wr_ptr = rd_ptr + out_buf_count;
1014 if (wr_ptr >= buf_stop_ptr)
1015 wr_ptr -= OUT_BUFFER_SIZE;
1017 /* If we wrap the ring buffer, let the user space program handle it by
1018 * truncating the data. This could be more elegant, small buffer
1019 * fragments may occur.
1021 bytes_free = OUT_BUFFER_SIZE - out_buf_count;
1022 if (wr_ptr + bytes_free > buf_stop_ptr)
1023 bytes_free = buf_stop_ptr - wr_ptr;
1024 trunc_count = (count < bytes_free) ? count : bytes_free;
1026 if (copy_from_user(wr_ptr, buf, trunc_count))
1029 DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n",
1030 out_buf_count, trunc_count,
1031 port->out_buf_count, port->out_buffer,
1032 wr_ptr, buf_stop_ptr));
1034 /* Make sure transmitter/receiver is running */
1035 if (!port->started) {
1036 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1037 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1038 cfg.en = regk_sser_yes;
1039 rec_cfg.rec_en = port->input;
1040 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
1041 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1045 /* Setup wait if blocking */
1046 if (!(file->f_flags & O_NONBLOCK)) {
1047 add_wait_queue(&port->out_wait_q, &wait);
1048 set_current_state(TASK_INTERRUPTIBLE);
1051 spin_lock_irqsave(&port->lock, flags);
1052 port->out_buf_count += trunc_count;
1053 if (port->use_dma) {
1054 start_dma_out(port, wr_ptr, trunc_count);
1055 } else if (!port->tr_running) {
1056 reg_sser_rw_intr_mask intr_mask;
1057 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1058 /* Start sender by writing data */
1060 /* and enable transmitter ready IRQ */
1062 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1064 spin_unlock_irqrestore(&port->lock, flags);
1066 /* Exit if non blocking */
1067 if (file->f_flags & O_NONBLOCK) {
1068 DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n",
1069 port->port_nbr, trunc_count,
1070 REG_RD_INT(dma, port->regi_dmaout, r_intr)));
1075 set_current_state(TASK_RUNNING);
1076 remove_wait_queue(&port->out_wait_q, &wait);
1078 if (signal_pending(current))
1081 DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n",
1082 port->port_nbr, trunc_count));
1086 static ssize_t sync_serial_read(struct file * file, char * buf,
1087 size_t count, loff_t *ppos)
1089 int dev = iminor(file->f_path.dentry->d_inode);
1092 unsigned char* start;
1094 unsigned long flags;
1096 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
1098 DEBUG(printk("Invalid minor %d\n", dev));
1103 DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
1107 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1108 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1109 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1110 cfg.en = regk_sser_yes;
1111 tr_cfg.tr_en = regk_sser_yes;
1112 rec_cfg.rec_en = regk_sser_yes;
1113 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
1114 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1115 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1119 /* Calculate number of available bytes */
1120 /* Save pointers to avoid that they are modified by interrupt */
1121 spin_lock_irqsave(&port->lock, flags);
1122 start = (unsigned char*)port->readp; /* cast away volatile */
1123 end = (unsigned char*)port->writep; /* cast away volatile */
1124 spin_unlock_irqrestore(&port->lock, flags);
1125 while ((start == end) && !port->full) /* No data */
1127 DEBUGREAD(printk(KERN_DEBUG "&"));
1128 if (file->f_flags & O_NONBLOCK)
1131 interruptible_sleep_on(&port->in_wait_q);
1132 if (signal_pending(current))
1135 spin_lock_irqsave(&port->lock, flags);
1136 start = (unsigned char*)port->readp; /* cast away volatile */
1137 end = (unsigned char*)port->writep; /* cast away volatile */
1138 spin_unlock_irqrestore(&port->lock, flags);
1141 /* Lazy read, never return wrapped data. */
1143 avail = port->in_buffer_size;
1144 else if (end > start)
1145 avail = end - start;
1147 avail = port->flip + port->in_buffer_size - start;
1149 count = count > avail ? avail : count;
1150 if (copy_to_user(buf, start, count))
1152 /* Disable interrupts while updating readp */
1153 spin_lock_irqsave(&port->lock, flags);
1154 port->readp += count;
1155 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
1156 port->readp = port->flip;
1158 spin_unlock_irqrestore(&port->lock, flags);
1159 DEBUGREAD(printk("r %d\n", count));
1163 static void send_word(sync_port* port)
1165 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1166 reg_sser_rw_tr_data tr_data = {0};
1168 switch(tr_cfg.sample_size)
1171 port->out_buf_count--;
1172 tr_data.data = *port->out_rd_ptr++;
1173 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1174 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1175 port->out_rd_ptr = port->out_buffer;
1179 int data = (*port->out_rd_ptr++) << 8;
1180 data |= *port->out_rd_ptr++;
1181 port->out_buf_count -= 2;
1182 tr_data.data = data;
1183 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1184 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1185 port->out_rd_ptr = port->out_buffer;
1189 port->out_buf_count -= 2;
1190 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1191 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1192 port->out_rd_ptr += 2;
1193 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1194 port->out_rd_ptr = port->out_buffer;
1197 port->out_buf_count -= 3;
1198 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1199 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1200 port->out_rd_ptr += 2;
1201 tr_data.data = *port->out_rd_ptr++;
1202 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1203 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1204 port->out_rd_ptr = port->out_buffer;
1207 port->out_buf_count -= 4;
1208 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1209 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1210 port->out_rd_ptr += 2;
1211 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1212 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1213 port->out_rd_ptr += 2;
1214 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1215 port->out_rd_ptr = port->out_buffer;
1220 static void start_dma_out(struct sync_port *port,
1221 const char *data, int count)
1223 port->active_tr_descr->buf = (char *) virt_to_phys((char *) data);
1224 port->active_tr_descr->after = port->active_tr_descr->buf + count;
1225 port->active_tr_descr->intr = 1;
1227 port->active_tr_descr->eol = 1;
1228 port->prev_tr_descr->eol = 0;
1230 DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n",
1231 port->prev_tr_descr, port->active_tr_descr));
1232 port->prev_tr_descr = port->active_tr_descr;
1233 port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next);
1235 if (!port->tr_running) {
1236 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
1239 port->out_context.next = 0;
1240 port->out_context.saved_data =
1241 (dma_descr_data *)virt_to_phys(port->prev_tr_descr);
1242 port->out_context.saved_data_buf = port->prev_tr_descr->buf;
1244 DMA_START_CONTEXT(port->regi_dmaout,
1245 virt_to_phys((char *)&port->out_context));
1247 tr_cfg.tr_en = regk_sser_yes;
1248 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1249 DEBUGTRDMA(printk(KERN_DEBUG "dma s\n"););
1251 DMA_CONTINUE_DATA(port->regi_dmaout);
1252 DEBUGTRDMA(printk(KERN_DEBUG "dma c\n"););
1255 port->tr_running = 1;
1258 static void start_dma_in(sync_port *port)
1262 port->writep = port->flip;
1264 if (port->writep > port->flip + port->in_buffer_size) {
1265 panic("Offset too large in sync serial driver\n");
1268 buf = (char*)virt_to_phys(port->in_buffer);
1269 for (i = 0; i < NBR_IN_DESCR; i++) {
1270 port->in_descr[i].buf = buf;
1271 port->in_descr[i].after = buf + port->inbufchunk;
1272 port->in_descr[i].intr = 1;
1273 port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]);
1274 port->in_descr[i].buf = buf;
1275 buf += port->inbufchunk;
1277 /* Link the last descriptor to the first */
1278 port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
1279 port->in_descr[i-1].eol = regk_sser_yes;
1280 port->next_rx_desc = &port->in_descr[0];
1281 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
1282 port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
1283 port->in_context.saved_data_buf = port->in_descr[0].buf;
1284 DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
1288 static irqreturn_t tr_interrupt(int irq, void *dev_id)
1290 reg_dma_r_masked_intr masked;
1291 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1292 reg_dma_rw_stat stat;
1297 for (i = 0; i < NBR_PORTS; i++) {
1298 sync_port *port = &ports[i];
1299 if (!port->enabled || !port->use_dma)
1302 /* IRQ active for the port? */
1303 masked = REG_RD(dma, port->regi_dmaout, r_masked_intr);
1309 /* Check if we should stop the DMA transfer */
1310 stat = REG_RD(dma, port->regi_dmaout, rw_stat);
1311 if (stat.list_state == regk_dma_data_at_eol)
1315 REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr);
1318 /* The DMA has completed a descriptor, EOL was not
1319 * encountered, so step relevant descriptor and
1320 * datapointers forward. */
1322 sent = port->catch_tr_descr->after -
1323 port->catch_tr_descr->buf;
1324 DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t"
1325 "in descr %p (ac: %p)\n",
1326 port->out_buf_count, sent,
1327 port->out_buf_count - sent,
1328 port->catch_tr_descr,
1329 port->active_tr_descr););
1330 port->out_buf_count -= sent;
1331 port->catch_tr_descr =
1332 phys_to_virt((int) port->catch_tr_descr->next);
1334 phys_to_virt((int) port->catch_tr_descr->buf);
1338 * Note that if an EOL was encountered during the irq
1339 * locked section of sync_ser_write the DMA will be
1340 * restarted and the eol flag will be cleared.
1341 * The remaining descriptors will be traversed by
1342 * the descriptor interrupts as usual.
1345 while (!port->catch_tr_descr->eol) {
1346 sent = port->catch_tr_descr->after -
1347 port->catch_tr_descr->buf;
1348 DEBUGOUTBUF(printk(KERN_DEBUG
1349 "traversing descr %p -%d (%d)\n",
1350 port->catch_tr_descr,
1352 port->out_buf_count));
1353 port->out_buf_count -= sent;
1354 port->catch_tr_descr = phys_to_virt(
1355 (int)port->catch_tr_descr->next);
1357 if (i >= NBR_OUT_DESCR) {
1358 /* TODO: Reset and recover */
1359 panic("sync_serial: missing eol");
1362 sent = port->catch_tr_descr->after -
1363 port->catch_tr_descr->buf;
1364 DEBUGOUTBUF(printk(KERN_DEBUG
1365 "eol at descr %p -%d (%d)\n",
1366 port->catch_tr_descr,
1368 port->out_buf_count));
1370 port->out_buf_count -= sent;
1372 /* Update read pointer to first free byte, we
1373 * may already be writing data there. */
1375 phys_to_virt((int) port->catch_tr_descr->after);
1376 if (port->out_rd_ptr > port->out_buffer +
1378 port->out_rd_ptr = port->out_buffer;
1380 reg_sser_rw_tr_cfg tr_cfg =
1381 REG_RD(sser, port->regi_sser, rw_tr_cfg);
1382 DEBUGTXINT(printk(KERN_DEBUG
1383 "tr_int DMA stop %d, set catch @ %p\n",
1384 port->out_buf_count,
1385 port->active_tr_descr));
1386 if (port->out_buf_count != 0)
1387 printk(KERN_CRIT "sync_ser: buffer not "
1388 "empty after eol.\n");
1389 port->catch_tr_descr = port->active_tr_descr;
1390 port->tr_running = 0;
1391 tr_cfg.tr_en = regk_sser_no;
1392 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1394 /* wake up the waiting process */
1395 wake_up_interruptible(&port->out_wait_q);
1397 return IRQ_RETVAL(found);
1398 } /* tr_interrupt */
1400 static irqreturn_t rx_interrupt(int irq, void *dev_id)
1402 reg_dma_r_masked_intr masked;
1403 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1408 for (i = 0; i < NBR_PORTS; i++)
1410 sync_port *port = &ports[i];
1412 if (!port->enabled || !port->use_dma )
1415 masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
1417 if (masked.data) /* Descriptor interrupt */
1420 while (REG_RD(dma, port->regi_dmain, rw_data) !=
1421 virt_to_phys(port->next_rx_desc)) {
1422 DEBUGRXINT(printk(KERN_DEBUG "!"));
1423 if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
1424 int first_size = port->flip + port->in_buffer_size - port->writep;
1425 memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
1426 memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
1427 port->writep = port->flip + port->inbufchunk - first_size;
1429 memcpy((char*)port->writep,
1430 phys_to_virt((unsigned)port->next_rx_desc->buf),
1432 port->writep += port->inbufchunk;
1433 if (port->writep >= port->flip + port->in_buffer_size)
1434 port->writep = port->flip;
1436 if (port->writep == port->readp)
1441 port->next_rx_desc->eol = 1;
1442 port->prev_rx_desc->eol = 0;
1443 /* Cache bug workaround */
1444 flush_dma_descr(port->prev_rx_desc, 0);
1445 port->prev_rx_desc = port->next_rx_desc;
1446 port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
1447 /* Cache bug workaround */
1448 flush_dma_descr(port->prev_rx_desc, 1);
1449 /* wake up the waiting process */
1450 wake_up_interruptible(&port->in_wait_q);
1451 DMA_CONTINUE(port->regi_dmain);
1452 REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
1457 return IRQ_RETVAL(found);
1458 } /* rx_interrupt */
1459 #endif /* SYNC_SER_DMA */
1461 #ifdef SYNC_SER_MANUAL
1462 static irqreturn_t manual_interrupt(int irq, void *dev_id)
1466 reg_sser_r_masked_intr masked;
1468 for (i = 0; i < NBR_PORTS; i++)
1470 sync_port *port = &ports[i];
1472 if (!port->enabled || port->use_dma)
1477 masked = REG_RD(sser, port->regi_sser, r_masked_intr);
1478 if (masked.rdav) /* Data received? */
1480 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1481 reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data);
1484 switch(rec_cfg.sample_size)
1487 *port->writep++ = data.data & 0xff;
1490 *port->writep = (data.data & 0x0ff0) >> 4;
1491 *(port->writep + 1) = data.data & 0x0f;
1495 *(unsigned short*)port->writep = data.data;
1499 *(unsigned int*)port->writep = data.data;
1503 *(unsigned int*)port->writep = data.data;
1508 if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */
1509 port->writep = port->flip;
1510 if (port->writep == port->readp) {
1511 /* receive buffer overrun, discard oldest data
1514 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
1515 port->readp = port->flip;
1517 if (sync_data_avail(port) >= port->inbufchunk)
1518 wake_up_interruptible(&port->in_wait_q); /* Wake up application */
1521 if (masked.trdy) /* Transmitter ready? */
1524 if (port->out_buf_count > 0) /* More data to send */
1526 else /* transmission finished */
1528 reg_sser_rw_intr_mask intr_mask;
1529 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1531 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1532 wake_up_interruptible(&port->out_wait_q); /* Wake up application */
1536 return IRQ_RETVAL(found);
1540 module_init(etrax_sync_serial_init);