2 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/device.h>
22 #include <linux/ioport.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/platform_device.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/spi/spi.h>
28 #include <linux/workqueue.h>
29 #include <linux/errno.h>
30 #include <linux/delay.h>
34 #include <asm/hardware.h>
35 #include <asm/delay.h>
38 #include <asm/arch/hardware.h>
39 #include <asm/arch/pxa-regs.h>
40 #include <asm/arch/pxa2xx_spi.h>
42 MODULE_AUTHOR("Stephen Street");
43 MODULE_DESCRIPTION("PXA2xx SSP SPI Contoller");
44 MODULE_LICENSE("GPL");
48 #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
49 #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
50 #define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
52 #define DEFINE_SSP_REG(reg, off) \
53 static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
54 static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
56 DEFINE_SSP_REG(SSCR0, 0x00)
57 DEFINE_SSP_REG(SSCR1, 0x04)
58 DEFINE_SSP_REG(SSSR, 0x08)
59 DEFINE_SSP_REG(SSITR, 0x0c)
60 DEFINE_SSP_REG(SSDR, 0x10)
61 DEFINE_SSP_REG(SSTO, 0x28)
62 DEFINE_SSP_REG(SSPSP, 0x2c)
64 #define START_STATE ((void*)0)
65 #define RUNNING_STATE ((void*)1)
66 #define DONE_STATE ((void*)2)
67 #define ERROR_STATE ((void*)-1)
69 #define QUEUE_RUNNING 0
70 #define QUEUE_STOPPED 1
73 /* Driver model hookup */
74 struct platform_device *pdev;
76 /* SPI framework hookup */
77 enum pxa_ssp_type ssp_type;
78 struct spi_master *master;
81 struct pxa2xx_spi_master *master_info;
88 /* SSP register addresses */
98 /* Driver message queue */
99 struct workqueue_struct *workqueue;
100 struct work_struct pump_messages;
102 struct list_head queue;
106 /* Message Transfer pump */
107 struct tasklet_struct pump_transfers;
109 /* Current message transfer state info */
110 struct spi_message* cur_msg;
111 struct spi_transfer* cur_transfer;
112 struct chip_data *cur_chip;
124 void (*write)(struct driver_data *drv_data);
125 void (*read)(struct driver_data *drv_data);
126 irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
127 void (*cs_control)(u32 command);
142 void (*write)(struct driver_data *drv_data);
143 void (*read)(struct driver_data *drv_data);
144 void (*cs_control)(u32 command);
147 static void pump_messages(void *data);
149 static int flush(struct driver_data *drv_data)
151 unsigned long limit = loops_per_jiffy << 1;
153 void *reg = drv_data->ioaddr;
156 while (read_SSSR(reg) & SSSR_RNE) {
159 } while ((read_SSSR(reg) & SSSR_BSY) && limit--);
160 write_SSSR(SSSR_ROR, reg);
165 static void restore_state(struct driver_data *drv_data)
167 void *reg = drv_data->ioaddr;
169 /* Clear status and disable clock */
170 write_SSSR(drv_data->clear_sr, reg);
171 write_SSCR0(drv_data->cur_chip->cr0 & ~SSCR0_SSE, reg);
173 /* Load the registers */
174 write_SSCR1(drv_data->cur_chip->cr1, reg);
175 write_SSCR0(drv_data->cur_chip->cr0, reg);
176 if (drv_data->ssp_type != PXA25x_SSP) {
178 write_SSPSP(drv_data->cur_chip->psp, reg);
182 static void null_cs_control(u32 command)
186 static void null_writer(struct driver_data *drv_data)
188 void *reg = drv_data->ioaddr;
189 u8 n_bytes = drv_data->cur_chip->n_bytes;
191 while ((read_SSSR(reg) & SSSR_TNF)
192 && (drv_data->tx < drv_data->tx_end)) {
194 drv_data->tx += n_bytes;
198 static void null_reader(struct driver_data *drv_data)
200 void *reg = drv_data->ioaddr;
201 u8 n_bytes = drv_data->cur_chip->n_bytes;
203 while ((read_SSSR(reg) & SSSR_RNE)
204 && (drv_data->rx < drv_data->rx_end)) {
206 drv_data->rx += n_bytes;
210 static void u8_writer(struct driver_data *drv_data)
212 void *reg = drv_data->ioaddr;
214 while ((read_SSSR(reg) & SSSR_TNF)
215 && (drv_data->tx < drv_data->tx_end)) {
216 write_SSDR(*(u8 *)(drv_data->tx), reg);
221 static void u8_reader(struct driver_data *drv_data)
223 void *reg = drv_data->ioaddr;
225 while ((read_SSSR(reg) & SSSR_RNE)
226 && (drv_data->rx < drv_data->rx_end)) {
227 *(u8 *)(drv_data->rx) = read_SSDR(reg);
232 static void u16_writer(struct driver_data *drv_data)
234 void *reg = drv_data->ioaddr;
236 while ((read_SSSR(reg) & SSSR_TNF)
237 && (drv_data->tx < drv_data->tx_end)) {
238 write_SSDR(*(u16 *)(drv_data->tx), reg);
243 static void u16_reader(struct driver_data *drv_data)
245 void *reg = drv_data->ioaddr;
247 while ((read_SSSR(reg) & SSSR_RNE)
248 && (drv_data->rx < drv_data->rx_end)) {
249 *(u16 *)(drv_data->rx) = read_SSDR(reg);
253 static void u32_writer(struct driver_data *drv_data)
255 void *reg = drv_data->ioaddr;
257 while ((read_SSSR(reg) & SSSR_TNF)
258 && (drv_data->tx < drv_data->tx_end)) {
259 write_SSDR(*(u16 *)(drv_data->tx), reg);
264 static void u32_reader(struct driver_data *drv_data)
266 void *reg = drv_data->ioaddr;
268 while ((read_SSSR(reg) & SSSR_RNE)
269 && (drv_data->rx < drv_data->rx_end)) {
270 *(u32 *)(drv_data->rx) = read_SSDR(reg);
275 static void *next_transfer(struct driver_data *drv_data)
277 struct spi_message *msg = drv_data->cur_msg;
278 struct spi_transfer *trans = drv_data->cur_transfer;
280 /* Move to next transfer */
281 if (trans->transfer_list.next != &msg->transfers) {
282 drv_data->cur_transfer =
283 list_entry(trans->transfer_list.next,
286 return RUNNING_STATE;
291 static int map_dma_buffers(struct driver_data *drv_data)
293 struct spi_message *msg = drv_data->cur_msg;
294 struct device *dev = &msg->spi->dev;
296 if (!drv_data->cur_chip->enable_dma)
299 if (msg->is_dma_mapped)
300 return drv_data->rx_dma && drv_data->tx_dma;
302 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
305 /* Modify setup if rx buffer is null */
306 if (drv_data->rx == NULL) {
307 *drv_data->null_dma_buf = 0;
308 drv_data->rx = drv_data->null_dma_buf;
309 drv_data->rx_map_len = 4;
311 drv_data->rx_map_len = drv_data->len;
314 /* Modify setup if tx buffer is null */
315 if (drv_data->tx == NULL) {
316 *drv_data->null_dma_buf = 0;
317 drv_data->tx = drv_data->null_dma_buf;
318 drv_data->tx_map_len = 4;
320 drv_data->tx_map_len = drv_data->len;
322 /* Stream map the rx buffer */
323 drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
324 drv_data->rx_map_len,
326 if (dma_mapping_error(drv_data->rx_dma))
329 /* Stream map the tx buffer */
330 drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
331 drv_data->tx_map_len,
334 if (dma_mapping_error(drv_data->tx_dma)) {
335 dma_unmap_single(dev, drv_data->rx_dma,
336 drv_data->rx_map_len, DMA_FROM_DEVICE);
343 static void unmap_dma_buffers(struct driver_data *drv_data)
347 if (!drv_data->dma_mapped)
350 if (!drv_data->cur_msg->is_dma_mapped) {
351 dev = &drv_data->cur_msg->spi->dev;
352 dma_unmap_single(dev, drv_data->rx_dma,
353 drv_data->rx_map_len, DMA_FROM_DEVICE);
354 dma_unmap_single(dev, drv_data->tx_dma,
355 drv_data->tx_map_len, DMA_TO_DEVICE);
358 drv_data->dma_mapped = 0;
361 /* caller already set message->status; dma and pio irqs are blocked */
362 static void giveback(struct spi_message *message, struct driver_data *drv_data)
364 struct spi_transfer* last_transfer;
366 last_transfer = list_entry(message->transfers.prev,
370 if (!last_transfer->cs_change)
371 drv_data->cs_control(PXA2XX_CS_DEASSERT);
373 message->state = NULL;
374 if (message->complete)
375 message->complete(message->context);
377 drv_data->cur_msg = NULL;
378 drv_data->cur_transfer = NULL;
379 drv_data->cur_chip = NULL;
380 queue_work(drv_data->workqueue, &drv_data->pump_messages);
383 static int wait_ssp_rx_stall(void *ioaddr)
385 unsigned long limit = loops_per_jiffy << 1;
387 while ((read_SSSR(ioaddr) & SSSR_BSY) && limit--)
393 static int wait_dma_channel_stop(int channel)
395 unsigned long limit = loops_per_jiffy << 1;
397 while (!(DCSR(channel) & DCSR_STOPSTATE) && limit--)
403 static void dma_handler(int channel, void *data, struct pt_regs *regs)
405 struct driver_data *drv_data = data;
406 struct spi_message *msg = drv_data->cur_msg;
407 void *reg = drv_data->ioaddr;
408 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
409 u32 trailing_sssr = 0;
411 if (irq_status & DCSR_BUSERR) {
413 /* Disable interrupts, clear status and reset DMA */
414 if (drv_data->ssp_type != PXA25x_SSP)
416 write_SSSR(drv_data->clear_sr, reg);
417 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
418 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
419 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
421 if (flush(drv_data) == 0)
422 dev_err(&drv_data->pdev->dev,
423 "dma_handler: flush fail\n");
425 unmap_dma_buffers(drv_data);
427 if (channel == drv_data->tx_channel)
428 dev_err(&drv_data->pdev->dev,
429 "dma_handler: bad bus address on "
430 "tx channel %d, source %x target = %x\n",
431 channel, DSADR(channel), DTADR(channel));
433 dev_err(&drv_data->pdev->dev,
434 "dma_handler: bad bus address on "
435 "rx channel %d, source %x target = %x\n",
436 channel, DSADR(channel), DTADR(channel));
438 msg->state = ERROR_STATE;
439 tasklet_schedule(&drv_data->pump_transfers);
442 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
443 if ((drv_data->ssp_type == PXA25x_SSP)
444 && (channel == drv_data->tx_channel)
445 && (irq_status & DCSR_ENDINTR)) {
447 /* Wait for rx to stall */
448 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
449 dev_err(&drv_data->pdev->dev,
450 "dma_handler: ssp rx stall failed\n");
452 /* Clear and disable interrupts on SSP and DMA channels*/
453 write_SSSR(drv_data->clear_sr, reg);
454 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
455 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
456 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
457 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
458 dev_err(&drv_data->pdev->dev,
459 "dma_handler: dma rx channel stop failed\n");
461 unmap_dma_buffers(drv_data);
463 /* Read trailing bytes */
464 /* Calculate number of trailing bytes, read them */
465 trailing_sssr = read_SSSR(reg);
466 if ((trailing_sssr & 0xf008) != 0xf000) {
467 drv_data->rx = drv_data->rx_end -
468 (((trailing_sssr >> 12) & 0x0f) + 1);
469 drv_data->read(drv_data);
471 msg->actual_length += drv_data->len;
473 /* Release chip select if requested, transfer delays are
474 * handled in pump_transfers */
475 if (drv_data->cs_change)
476 drv_data->cs_control(PXA2XX_CS_DEASSERT);
478 /* Move to next transfer */
479 msg->state = next_transfer(drv_data);
481 /* Schedule transfer tasklet */
482 tasklet_schedule(&drv_data->pump_transfers);
486 static irqreturn_t dma_transfer(struct driver_data *drv_data)
489 u32 trailing_sssr = 0;
490 struct spi_message *msg = drv_data->cur_msg;
491 void *reg = drv_data->ioaddr;
493 irq_status = read_SSSR(reg) & drv_data->mask_sr;
494 if (irq_status & SSSR_ROR) {
495 /* Clear and disable interrupts on SSP and DMA channels*/
496 if (drv_data->ssp_type != PXA25x_SSP)
498 write_SSSR(drv_data->clear_sr, reg);
499 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
500 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
501 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
502 unmap_dma_buffers(drv_data);
504 if (flush(drv_data) == 0)
505 dev_err(&drv_data->pdev->dev,
506 "dma_transfer: flush fail\n");
508 dev_warn(&drv_data->pdev->dev, "dma_transfer: fifo overun\n");
510 drv_data->cur_msg->state = ERROR_STATE;
511 tasklet_schedule(&drv_data->pump_transfers);
516 /* Check for false positive timeout */
517 if ((irq_status & SSSR_TINT) && DCSR(drv_data->tx_channel) & DCSR_RUN) {
518 write_SSSR(SSSR_TINT, reg);
522 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
524 /* Clear and disable interrupts on SSP and DMA channels*/
525 if (drv_data->ssp_type != PXA25x_SSP)
527 write_SSSR(drv_data->clear_sr, reg);
528 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
529 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
530 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
532 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
533 dev_err(&drv_data->pdev->dev,
534 "dma_transfer: dma rx channel stop failed\n");
536 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
537 dev_err(&drv_data->pdev->dev,
538 "dma_transfer: ssp rx stall failed\n");
540 unmap_dma_buffers(drv_data);
542 /* Calculate number of trailing bytes, read them */
543 trailing_sssr = read_SSSR(reg);
544 if ((trailing_sssr & 0xf008) != 0xf000) {
545 drv_data->rx = drv_data->rx_end -
546 (((trailing_sssr >> 12) & 0x0f) + 1);
547 drv_data->read(drv_data);
549 msg->actual_length += drv_data->len;
551 /* Release chip select if requested, transfer delays are
552 * handled in pump_transfers */
553 if (drv_data->cs_change)
554 drv_data->cs_control(PXA2XX_CS_DEASSERT);
556 /* Move to next transfer */
557 msg->state = next_transfer(drv_data);
559 /* Schedule transfer tasklet */
560 tasklet_schedule(&drv_data->pump_transfers);
565 /* Opps problem detected */
569 static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
572 struct spi_message *msg = drv_data->cur_msg;
573 void *reg = drv_data->ioaddr;
574 irqreturn_t handled = IRQ_NONE;
575 unsigned long limit = loops_per_jiffy << 1;
577 while ((irq_status = (read_SSSR(reg) & drv_data->mask_sr))) {
579 if (irq_status & SSSR_ROR) {
581 /* Clear and disable interrupts */
582 if (drv_data->ssp_type != PXA25x_SSP)
584 write_SSSR(drv_data->clear_sr, reg);
585 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
587 if (flush(drv_data) == 0)
588 dev_err(&drv_data->pdev->dev,
589 "interrupt_transfer: flush fail\n");
591 dev_warn(&drv_data->pdev->dev,
592 "interrupt_transfer: fifo overun\n");
594 msg->state = ERROR_STATE;
595 tasklet_schedule(&drv_data->pump_transfers);
600 /* Look for false positive timeout */
601 if ((irq_status & SSSR_TINT)
602 && (drv_data->rx < drv_data->rx_end))
603 write_SSSR(SSSR_TINT, reg);
606 drv_data->read(drv_data);
607 drv_data->write(drv_data);
609 if (drv_data->tx == drv_data->tx_end) {
610 /* Disable tx interrupt */
611 write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg);
613 /* PXA25x_SSP has no timeout, read trailing bytes */
614 if (drv_data->ssp_type == PXA25x_SSP) {
615 while ((read_SSSR(reg) & SSSR_BSY) && limit--)
616 drv_data->read(drv_data);
619 dev_err(&drv_data->pdev->dev,
620 "interrupt_transfer: "
621 "trailing byte read failed\n");
625 if ((irq_status & SSSR_TINT)
626 || (drv_data->rx == drv_data->rx_end)) {
629 if (drv_data->ssp_type != PXA25x_SSP)
631 write_SSSR(drv_data->clear_sr, reg);
632 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
634 /* Update total byte transfered */
635 msg->actual_length += drv_data->len;
637 /* Release chip select if requested, transfer delays are
638 * handled in pump_transfers */
639 if (drv_data->cs_change)
640 drv_data->cs_control(PXA2XX_CS_DEASSERT);
642 /* Move to next transfer */
643 msg->state = next_transfer(drv_data);
645 /* Schedule transfer tasklet */
646 tasklet_schedule(&drv_data->pump_transfers);
651 /* We did something */
652 handled = IRQ_HANDLED;
658 static irqreturn_t ssp_int(int irq, void *dev_id, struct pt_regs *regs)
660 struct driver_data *drv_data = (struct driver_data *)dev_id;
662 if (!drv_data->cur_msg) {
663 dev_err(&drv_data->pdev->dev, "bad message state "
664 "in interrupt handler\n");
669 return drv_data->transfer_handler(drv_data);
672 static void pump_transfers(unsigned long data)
674 struct driver_data *drv_data = (struct driver_data *)data;
675 struct spi_message *message = NULL;
676 struct spi_transfer *transfer = NULL;
677 struct spi_transfer *previous = NULL;
678 struct chip_data *chip = NULL;
679 void *reg = drv_data->ioaddr;
681 /* Get current state information */
682 message = drv_data->cur_msg;
683 transfer = drv_data->cur_transfer;
684 chip = drv_data->cur_chip;
686 /* Handle for abort */
687 if (message->state == ERROR_STATE) {
688 message->status = -EIO;
689 giveback(message, drv_data);
693 /* Handle end of message */
694 if (message->state == DONE_STATE) {
696 giveback(message, drv_data);
700 /* Delay if requested at end of transfer*/
701 if (message->state == RUNNING_STATE) {
702 previous = list_entry(transfer->transfer_list.prev,
705 if (previous->delay_usecs)
706 udelay(previous->delay_usecs);
709 /* Setup the transfer state based on the type of transfer */
710 if (flush(drv_data) == 0) {
711 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
712 message->status = -EIO;
713 giveback(message, drv_data);
716 drv_data->cs_control = chip->cs_control;
717 drv_data->tx = (void *)transfer->tx_buf;
718 drv_data->tx_end = drv_data->tx + transfer->len;
719 drv_data->rx = transfer->rx_buf;
720 drv_data->rx_end = drv_data->rx + transfer->len;
721 drv_data->rx_dma = transfer->rx_dma;
722 drv_data->tx_dma = transfer->tx_dma;
723 drv_data->len = transfer->len;
724 drv_data->write = drv_data->tx ? chip->write : null_writer;
725 drv_data->read = drv_data->rx ? chip->read : null_reader;
726 drv_data->cs_change = transfer->cs_change;
727 message->state = RUNNING_STATE;
729 /* Try to map dma buffer and do a dma transfer if successful */
730 if ((drv_data->dma_mapped = map_dma_buffers(drv_data))) {
732 /* Ensure we have the correct interrupt handler */
733 drv_data->transfer_handler = dma_transfer;
735 /* Setup rx DMA Channel */
736 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
737 DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
738 DTADR(drv_data->rx_channel) = drv_data->rx_dma;
739 if (drv_data->rx == drv_data->null_dma_buf)
740 /* No target address increment */
741 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
743 | chip->dma_burst_size
746 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
749 | chip->dma_burst_size
752 /* Setup tx DMA Channel */
753 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
754 DSADR(drv_data->tx_channel) = drv_data->tx_dma;
755 DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
756 if (drv_data->tx == drv_data->null_dma_buf)
757 /* No source address increment */
758 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
760 | chip->dma_burst_size
763 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
766 | chip->dma_burst_size
769 /* Enable dma end irqs on SSP to detect end of transfer */
770 if (drv_data->ssp_type == PXA25x_SSP)
771 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
773 /* Fix me, need to handle cs polarity */
774 drv_data->cs_control(PXA2XX_CS_ASSERT);
777 write_SSSR(drv_data->clear_sr, reg);
778 DCSR(drv_data->rx_channel) |= DCSR_RUN;
779 DCSR(drv_data->tx_channel) |= DCSR_RUN;
780 if (drv_data->ssp_type != PXA25x_SSP)
781 write_SSTO(chip->timeout, reg);
782 write_SSCR1(chip->cr1
783 | chip->dma_threshold
787 /* Ensure we have the correct interrupt handler */
788 drv_data->transfer_handler = interrupt_transfer;
790 /* Fix me, need to handle cs polarity */
791 drv_data->cs_control(PXA2XX_CS_ASSERT);
794 write_SSSR(drv_data->clear_sr, reg);
795 if (drv_data->ssp_type != PXA25x_SSP)
796 write_SSTO(chip->timeout, reg);
797 write_SSCR1(chip->cr1
804 static void pump_messages(void *data)
806 struct driver_data *drv_data = data;
809 /* Lock queue and check for queue work */
810 spin_lock_irqsave(&drv_data->lock, flags);
811 if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
813 spin_unlock_irqrestore(&drv_data->lock, flags);
817 /* Make sure we are not already running a message */
818 if (drv_data->cur_msg) {
819 spin_unlock_irqrestore(&drv_data->lock, flags);
823 /* Extract head of queue */
824 drv_data->cur_msg = list_entry(drv_data->queue.next,
825 struct spi_message, queue);
826 list_del_init(&drv_data->cur_msg->queue);
828 spin_unlock_irqrestore(&drv_data->lock, flags);
830 /* Initial message state*/
831 drv_data->cur_msg->state = START_STATE;
832 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
836 /* Setup the SSP using the per chip configuration */
837 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
838 restore_state(drv_data);
840 /* Mark as busy and launch transfers */
841 tasklet_schedule(&drv_data->pump_transfers);
844 static int transfer(struct spi_device *spi, struct spi_message *msg)
846 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
849 spin_lock_irqsave(&drv_data->lock, flags);
851 if (drv_data->run == QUEUE_STOPPED) {
852 spin_unlock_irqrestore(&drv_data->lock, flags);
856 msg->actual_length = 0;
857 msg->status = -EINPROGRESS;
858 msg->state = START_STATE;
860 list_add_tail(&msg->queue, &drv_data->queue);
862 if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
863 queue_work(drv_data->workqueue, &drv_data->pump_messages);
865 spin_unlock_irqrestore(&drv_data->lock, flags);
870 static int setup(struct spi_device *spi)
872 struct pxa2xx_spi_chip *chip_info = NULL;
873 struct chip_data *chip;
874 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
875 unsigned int clk_div;
877 if (!spi->bits_per_word)
878 spi->bits_per_word = 8;
880 if (drv_data->ssp_type != PXA25x_SSP
881 && (spi->bits_per_word < 4 || spi->bits_per_word > 32))
883 else if (spi->bits_per_word < 4 || spi->bits_per_word > 16)
886 /* Only alloc (or use chip_info) on first setup */
887 chip = spi_get_ctldata(spi);
889 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
893 chip->cs_control = null_cs_control;
894 chip->enable_dma = 0;
896 chip->threshold = SSCR1_RxTresh(1) | SSCR1_TxTresh(1);
897 chip->dma_burst_size = drv_data->master_info->enable_dma ?
900 chip_info = spi->controller_data;
903 /* chip_info isn't always needed */
905 if (chip_info->cs_control)
906 chip->cs_control = chip_info->cs_control;
908 chip->timeout = (chip_info->timeout_microsecs * 10000) / 2712;
910 chip->threshold = SSCR1_RxTresh(chip_info->rx_threshold)
911 | SSCR1_TxTresh(chip_info->tx_threshold);
913 chip->enable_dma = chip_info->dma_burst_size != 0
914 && drv_data->master_info->enable_dma;
915 chip->dma_threshold = 0;
917 if (chip->enable_dma) {
918 if (chip_info->dma_burst_size <= 8) {
919 chip->dma_threshold = SSCR1_RxTresh(8)
921 chip->dma_burst_size = DCMD_BURST8;
922 } else if (chip_info->dma_burst_size <= 16) {
923 chip->dma_threshold = SSCR1_RxTresh(16)
925 chip->dma_burst_size = DCMD_BURST16;
927 chip->dma_threshold = SSCR1_RxTresh(32)
929 chip->dma_burst_size = DCMD_BURST32;
934 if (chip_info->enable_loopback)
935 chip->cr1 = SSCR1_LBM;
938 if (drv_data->ioaddr == SSP1_VIRT)
939 clk_div = SSP1_SerClkDiv(spi->max_speed_hz);
940 else if (drv_data->ioaddr == SSP2_VIRT)
941 clk_div = SSP2_SerClkDiv(spi->max_speed_hz);
942 else if (drv_data->ioaddr == SSP3_VIRT)
943 clk_div = SSP3_SerClkDiv(spi->max_speed_hz);
949 | SSCR0_DataSize(spi->bits_per_word & 0x0f)
951 | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
952 chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) << 4)
953 | (((spi->mode & SPI_CPOL) != 0) << 3);
955 /* NOTE: PXA25x_SSP _could_ use external clocking ... */
956 if (drv_data->ssp_type != PXA25x_SSP)
957 dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n",
960 / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
963 dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n",
966 / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
969 if (spi->bits_per_word <= 8) {
971 chip->dma_width = DCMD_WIDTH1;
972 chip->read = u8_reader;
973 chip->write = u8_writer;
974 } else if (spi->bits_per_word <= 16) {
976 chip->dma_width = DCMD_WIDTH2;
977 chip->read = u16_reader;
978 chip->write = u16_writer;
979 } else if (spi->bits_per_word <= 32) {
980 chip->cr0 |= SSCR0_EDSS;
982 chip->dma_width = DCMD_WIDTH4;
983 chip->read = u32_reader;
984 chip->write = u32_writer;
986 dev_err(&spi->dev, "invalid wordsize\n");
991 spi_set_ctldata(spi, chip);
996 static void cleanup(const struct spi_device *spi)
998 struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
1003 static int init_queue(struct driver_data *drv_data)
1005 INIT_LIST_HEAD(&drv_data->queue);
1006 spin_lock_init(&drv_data->lock);
1008 drv_data->run = QUEUE_STOPPED;
1011 tasklet_init(&drv_data->pump_transfers,
1012 pump_transfers, (unsigned long)drv_data);
1014 INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data);
1015 drv_data->workqueue = create_singlethread_workqueue(
1016 drv_data->master->cdev.dev->bus_id);
1017 if (drv_data->workqueue == NULL)
1023 static int start_queue(struct driver_data *drv_data)
1025 unsigned long flags;
1027 spin_lock_irqsave(&drv_data->lock, flags);
1029 if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
1030 spin_unlock_irqrestore(&drv_data->lock, flags);
1034 drv_data->run = QUEUE_RUNNING;
1035 drv_data->cur_msg = NULL;
1036 drv_data->cur_transfer = NULL;
1037 drv_data->cur_chip = NULL;
1038 spin_unlock_irqrestore(&drv_data->lock, flags);
1040 queue_work(drv_data->workqueue, &drv_data->pump_messages);
1045 static int stop_queue(struct driver_data *drv_data)
1047 unsigned long flags;
1048 unsigned limit = 500;
1051 spin_lock_irqsave(&drv_data->lock, flags);
1053 /* This is a bit lame, but is optimized for the common execution path.
1054 * A wait_queue on the drv_data->busy could be used, but then the common
1055 * execution path (pump_messages) would be required to call wake_up or
1056 * friends on every SPI message. Do this instead */
1057 drv_data->run = QUEUE_STOPPED;
1058 while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
1059 spin_unlock_irqrestore(&drv_data->lock, flags);
1061 spin_lock_irqsave(&drv_data->lock, flags);
1064 if (!list_empty(&drv_data->queue) || drv_data->busy)
1067 spin_unlock_irqrestore(&drv_data->lock, flags);
1072 static int destroy_queue(struct driver_data *drv_data)
1076 status = stop_queue(drv_data);
1080 destroy_workqueue(drv_data->workqueue);
1085 static int pxa2xx_spi_probe(struct platform_device *pdev)
1087 struct device *dev = &pdev->dev;
1088 struct pxa2xx_spi_master *platform_info;
1089 struct spi_master *master;
1090 struct driver_data *drv_data = 0;
1091 struct resource *memory_resource;
1095 platform_info = dev->platform_data;
1097 if (platform_info->ssp_type == SSP_UNDEFINED) {
1098 dev_err(&pdev->dev, "undefined SSP\n");
1102 /* Allocate master with space for drv_data and null dma buffer */
1103 master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
1105 dev_err(&pdev->dev, "can not alloc spi_master\n");
1108 drv_data = spi_master_get_devdata(master);
1109 drv_data->master = master;
1110 drv_data->master_info = platform_info;
1111 drv_data->pdev = pdev;
1113 master->bus_num = pdev->id;
1114 master->num_chipselect = platform_info->num_chipselect;
1115 master->cleanup = cleanup;
1116 master->setup = setup;
1117 master->transfer = transfer;
1119 drv_data->ssp_type = platform_info->ssp_type;
1120 drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data +
1121 sizeof(struct driver_data)), 8);
1123 /* Setup register addresses */
1124 memory_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1125 if (!memory_resource) {
1126 dev_err(&pdev->dev, "memory resources not defined\n");
1128 goto out_error_master_alloc;
1131 drv_data->ioaddr = (void *)io_p2v(memory_resource->start);
1132 drv_data->ssdr_physical = memory_resource->start + 0x00000010;
1133 if (platform_info->ssp_type == PXA25x_SSP) {
1134 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
1135 drv_data->dma_cr1 = 0;
1136 drv_data->clear_sr = SSSR_ROR;
1137 drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
1139 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
1140 drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE;
1141 drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
1142 drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
1146 irq = platform_get_irq(pdev, 0);
1148 dev_err(&pdev->dev, "irq resource not defined\n");
1150 goto out_error_master_alloc;
1153 status = request_irq(irq, ssp_int, SA_INTERRUPT, dev->bus_id, drv_data);
1155 dev_err(&pdev->dev, "can not get IRQ\n");
1156 goto out_error_master_alloc;
1159 /* Setup DMA if requested */
1160 drv_data->tx_channel = -1;
1161 drv_data->rx_channel = -1;
1162 if (platform_info->enable_dma) {
1164 /* Get two DMA channels (rx and tx) */
1165 drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
1169 if (drv_data->rx_channel < 0) {
1170 dev_err(dev, "problem (%d) requesting rx channel\n",
1171 drv_data->rx_channel);
1173 goto out_error_irq_alloc;
1175 drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
1179 if (drv_data->tx_channel < 0) {
1180 dev_err(dev, "problem (%d) requesting tx channel\n",
1181 drv_data->tx_channel);
1183 goto out_error_dma_alloc;
1186 if (drv_data->ioaddr == SSP1_VIRT) {
1187 DRCMRRXSSDR = DRCMR_MAPVLD
1188 | drv_data->rx_channel;
1189 DRCMRTXSSDR = DRCMR_MAPVLD
1190 | drv_data->tx_channel;
1191 } else if (drv_data->ioaddr == SSP2_VIRT) {
1192 DRCMRRXSS2DR = DRCMR_MAPVLD
1193 | drv_data->rx_channel;
1194 DRCMRTXSS2DR = DRCMR_MAPVLD
1195 | drv_data->tx_channel;
1196 } else if (drv_data->ioaddr == SSP3_VIRT) {
1197 DRCMRRXSS3DR = DRCMR_MAPVLD
1198 | drv_data->rx_channel;
1199 DRCMRTXSS3DR = DRCMR_MAPVLD
1200 | drv_data->tx_channel;
1202 dev_err(dev, "bad SSP type\n");
1203 goto out_error_dma_alloc;
1207 /* Enable SOC clock */
1208 pxa_set_cken(platform_info->clock_enable, 1);
1210 /* Load default SSP configuration */
1211 write_SSCR0(0, drv_data->ioaddr);
1212 write_SSCR1(SSCR1_RxTresh(4) | SSCR1_TxTresh(12), drv_data->ioaddr);
1213 write_SSCR0(SSCR0_SerClkDiv(2)
1215 | SSCR0_DataSize(8),
1217 if (drv_data->ssp_type != PXA25x_SSP)
1218 write_SSTO(0, drv_data->ioaddr);
1219 write_SSPSP(0, drv_data->ioaddr);
1221 /* Initial and start queue */
1222 status = init_queue(drv_data);
1224 dev_err(&pdev->dev, "problem initializing queue\n");
1225 goto out_error_clock_enabled;
1227 status = start_queue(drv_data);
1229 dev_err(&pdev->dev, "problem starting queue\n");
1230 goto out_error_clock_enabled;
1233 /* Register with the SPI framework */
1234 platform_set_drvdata(pdev, drv_data);
1235 status = spi_register_master(master);
1237 dev_err(&pdev->dev, "problem registering spi master\n");
1238 goto out_error_queue_alloc;
1243 out_error_queue_alloc:
1244 destroy_queue(drv_data);
1246 out_error_clock_enabled:
1247 pxa_set_cken(platform_info->clock_enable, 0);
1249 out_error_dma_alloc:
1250 if (drv_data->tx_channel != -1)
1251 pxa_free_dma(drv_data->tx_channel);
1252 if (drv_data->rx_channel != -1)
1253 pxa_free_dma(drv_data->rx_channel);
1255 out_error_irq_alloc:
1256 free_irq(irq, drv_data);
1258 out_error_master_alloc:
1259 spi_master_put(master);
1263 static int pxa2xx_spi_remove(struct platform_device *pdev)
1265 struct driver_data *drv_data = platform_get_drvdata(pdev);
1272 /* Remove the queue */
1273 status = destroy_queue(drv_data);
1277 /* Disable the SSP at the peripheral and SOC level */
1278 write_SSCR0(0, drv_data->ioaddr);
1279 pxa_set_cken(drv_data->master_info->clock_enable, 0);
1282 if (drv_data->master_info->enable_dma) {
1283 if (drv_data->ioaddr == SSP1_VIRT) {
1286 } else if (drv_data->ioaddr == SSP2_VIRT) {
1289 } else if (drv_data->ioaddr == SSP3_VIRT) {
1293 pxa_free_dma(drv_data->tx_channel);
1294 pxa_free_dma(drv_data->rx_channel);
1298 irq = platform_get_irq(pdev, 0);
1300 free_irq(irq, drv_data);
1302 /* Disconnect from the SPI framework */
1303 spi_unregister_master(drv_data->master);
1305 /* Prevent double remove */
1306 platform_set_drvdata(pdev, NULL);
1311 static void pxa2xx_spi_shutdown(struct platform_device *pdev)
1315 if ((status = pxa2xx_spi_remove(pdev)) != 0)
1316 dev_err(&pdev->dev, "shutdown failed with %d\n", status);
1320 static int suspend_devices(struct device *dev, void *pm_message)
1322 pm_message_t *state = pm_message;
1324 if (dev->power.power_state.event != state->event) {
1325 dev_warn(dev, "pm state does not match request\n");
1332 static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1334 struct driver_data *drv_data = platform_get_drvdata(pdev);
1337 /* Check all childern for current power state */
1338 if (device_for_each_child(&pdev->dev, &state, suspend_devices) != 0) {
1339 dev_warn(&pdev->dev, "suspend aborted\n");
1343 status = stop_queue(drv_data);
1346 write_SSCR0(0, drv_data->ioaddr);
1347 pxa_set_cken(drv_data->master_info->clock_enable, 0);
1352 static int pxa2xx_spi_resume(struct platform_device *pdev)
1354 struct driver_data *drv_data = platform_get_drvdata(pdev);
1357 /* Enable the SSP clock */
1358 pxa_set_cken(drv_data->master_info->clock_enable, 1);
1360 /* Start the queue running */
1361 status = start_queue(drv_data);
1363 dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
1370 #define pxa2xx_spi_suspend NULL
1371 #define pxa2xx_spi_resume NULL
1372 #endif /* CONFIG_PM */
1374 static struct platform_driver driver = {
1376 .name = "pxa2xx-spi",
1377 .bus = &platform_bus_type,
1378 .owner = THIS_MODULE,
1380 .probe = pxa2xx_spi_probe,
1381 .remove = __devexit_p(pxa2xx_spi_remove),
1382 .shutdown = pxa2xx_spi_shutdown,
1383 .suspend = pxa2xx_spi_suspend,
1384 .resume = pxa2xx_spi_resume,
1387 static int __init pxa2xx_spi_init(void)
1389 platform_driver_register(&driver);
1393 module_init(pxa2xx_spi_init);
1395 static void __exit pxa2xx_spi_exit(void)
1397 platform_driver_unregister(&driver);
1399 module_exit(pxa2xx_spi_exit);