]> err.no Git - linux-2.6/blob - drivers/mmc/host/omap.c
MMC: OMAP: Lazy clock shutdown
[linux-2.6] / drivers / mmc / host / omap.c
1 /*
2  *  linux/drivers/mmc/host/omap.c
3  *
4  *  Copyright (C) 2004 Nokia Corporation
5  *  Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
6  *  Misc hacks here and there by Tony Lindgren <tony@atomide.com>
7  *  Other hacks (DMA, SD, etc) by David Brownell
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/init.h>
17 #include <linux/ioport.h>
18 #include <linux/platform_device.h>
19 #include <linux/interrupt.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/delay.h>
22 #include <linux/spinlock.h>
23 #include <linux/timer.h>
24 #include <linux/mmc/host.h>
25 #include <linux/mmc/card.h>
26 #include <linux/clk.h>
27 #include <linux/scatterlist.h>
28 #include <linux/i2c/tps65010.h>
29
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <asm/mach-types.h>
33
34 #include <asm/arch/board.h>
35 #include <asm/arch/mmc.h>
36 #include <asm/arch/gpio.h>
37 #include <asm/arch/dma.h>
38 #include <asm/arch/mux.h>
39 #include <asm/arch/fpga.h>
40
41 #define OMAP_MMC_REG_CMD        0x00
42 #define OMAP_MMC_REG_ARGL       0x04
43 #define OMAP_MMC_REG_ARGH       0x08
44 #define OMAP_MMC_REG_CON        0x0c
45 #define OMAP_MMC_REG_STAT       0x10
46 #define OMAP_MMC_REG_IE         0x14
47 #define OMAP_MMC_REG_CTO        0x18
48 #define OMAP_MMC_REG_DTO        0x1c
49 #define OMAP_MMC_REG_DATA       0x20
50 #define OMAP_MMC_REG_BLEN       0x24
51 #define OMAP_MMC_REG_NBLK       0x28
52 #define OMAP_MMC_REG_BUF        0x2c
53 #define OMAP_MMC_REG_SDIO       0x34
54 #define OMAP_MMC_REG_REV        0x3c
55 #define OMAP_MMC_REG_RSP0       0x40
56 #define OMAP_MMC_REG_RSP1       0x44
57 #define OMAP_MMC_REG_RSP2       0x48
58 #define OMAP_MMC_REG_RSP3       0x4c
59 #define OMAP_MMC_REG_RSP4       0x50
60 #define OMAP_MMC_REG_RSP5       0x54
61 #define OMAP_MMC_REG_RSP6       0x58
62 #define OMAP_MMC_REG_RSP7       0x5c
63 #define OMAP_MMC_REG_IOSR       0x60
64 #define OMAP_MMC_REG_SYSC       0x64
65 #define OMAP_MMC_REG_SYSS       0x68
66
67 #define OMAP_MMC_STAT_CARD_ERR          (1 << 14)
68 #define OMAP_MMC_STAT_CARD_IRQ          (1 << 13)
69 #define OMAP_MMC_STAT_OCR_BUSY          (1 << 12)
70 #define OMAP_MMC_STAT_A_EMPTY           (1 << 11)
71 #define OMAP_MMC_STAT_A_FULL            (1 << 10)
72 #define OMAP_MMC_STAT_CMD_CRC           (1 <<  8)
73 #define OMAP_MMC_STAT_CMD_TOUT          (1 <<  7)
74 #define OMAP_MMC_STAT_DATA_CRC          (1 <<  6)
75 #define OMAP_MMC_STAT_DATA_TOUT         (1 <<  5)
76 #define OMAP_MMC_STAT_END_BUSY          (1 <<  4)
77 #define OMAP_MMC_STAT_END_OF_DATA       (1 <<  3)
78 #define OMAP_MMC_STAT_CARD_BUSY         (1 <<  2)
79 #define OMAP_MMC_STAT_END_OF_CMD        (1 <<  0)
80
81 #define OMAP_MMC_READ(host, reg)        __raw_readw((host)->virt_base + OMAP_MMC_REG_##reg)
82 #define OMAP_MMC_WRITE(host, reg, val)  __raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg)
83
84 /*
85  * Command types
86  */
87 #define OMAP_MMC_CMDTYPE_BC     0
88 #define OMAP_MMC_CMDTYPE_BCR    1
89 #define OMAP_MMC_CMDTYPE_AC     2
90 #define OMAP_MMC_CMDTYPE_ADTC   3
91
92
93 #define DRIVER_NAME "mmci-omap"
94
95 /* Specifies how often in millisecs to poll for card status changes
96  * when the cover switch is open */
97 #define OMAP_MMC_COVER_POLL_DELAY       500
98
99 struct mmc_omap_host;
100
101 struct mmc_omap_slot {
102         int                     id;
103         unsigned int            vdd;
104         u16                     saved_con;
105         u16                     bus_mode;
106         unsigned int            fclk_freq;
107         unsigned                powered:1;
108
109         struct tasklet_struct   cover_tasklet;
110         struct timer_list       cover_timer;
111         unsigned                cover_open;
112
113         struct mmc_request      *mrq;
114         struct mmc_omap_host    *host;
115         struct mmc_host         *mmc;
116         struct omap_mmc_slot_data *pdata;
117 };
118
119 struct mmc_omap_host {
120         int                     initialized;
121         int                     suspended;
122         struct mmc_request *    mrq;
123         struct mmc_command *    cmd;
124         struct mmc_data *       data;
125         struct mmc_host *       mmc;
126         struct device *         dev;
127         unsigned char           id; /* 16xx chips have 2 MMC blocks */
128         struct clk *            iclk;
129         struct clk *            fclk;
130         struct resource         *mem_res;
131         void __iomem            *virt_base;
132         unsigned int            phys_base;
133         int                     irq;
134         unsigned char           bus_mode;
135         unsigned char           hw_bus_mode;
136
137         struct work_struct      cmd_abort_work;
138         unsigned                abort:1;
139         struct timer_list       cmd_abort_timer;
140
141         unsigned int            sg_len;
142         int                     sg_idx;
143         u16 *                   buffer;
144         u32                     buffer_bytes_left;
145         u32                     total_bytes_left;
146
147         unsigned                use_dma:1;
148         unsigned                brs_received:1, dma_done:1;
149         unsigned                dma_is_read:1;
150         unsigned                dma_in_use:1;
151         int                     dma_ch;
152         spinlock_t              dma_lock;
153         struct timer_list       dma_timer;
154         unsigned                dma_len;
155
156         short                   power_pin;
157
158         struct mmc_omap_slot    *slots[OMAP_MMC_MAX_SLOTS];
159         struct mmc_omap_slot    *current_slot;
160         spinlock_t              slot_lock;
161         wait_queue_head_t       slot_wq;
162         int                     nr_slots;
163
164         struct timer_list       clk_timer;
165         spinlock_t              clk_lock;     /* for changing enabled state */
166         unsigned int            fclk_enabled:1;
167
168         struct omap_mmc_platform_data *pdata;
169 };
170
171 void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
172 {
173         unsigned long tick_ns;
174
175         if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) {
176                 tick_ns = (1000000000 + slot->fclk_freq - 1) / slot->fclk_freq;
177                 ndelay(8 * tick_ns);
178         }
179 }
180
181 void mmc_omap_fclk_enable(struct mmc_omap_host *host, unsigned int enable)
182 {
183         unsigned long flags;
184
185         spin_lock_irqsave(&host->clk_lock, flags);
186         if (host->fclk_enabled != enable) {
187                 host->fclk_enabled = enable;
188                 if (enable)
189                         clk_enable(host->fclk);
190                 else
191                         clk_disable(host->fclk);
192         }
193         spin_unlock_irqrestore(&host->clk_lock, flags);
194 }
195
196 static void mmc_omap_select_slot(struct mmc_omap_slot *slot, int claimed)
197 {
198         struct mmc_omap_host *host = slot->host;
199         unsigned long flags;
200
201         if (claimed)
202                 goto no_claim;
203         spin_lock_irqsave(&host->slot_lock, flags);
204         while (host->mmc != NULL) {
205                 spin_unlock_irqrestore(&host->slot_lock, flags);
206                 wait_event(host->slot_wq, host->mmc == NULL);
207                 spin_lock_irqsave(&host->slot_lock, flags);
208         }
209         host->mmc = slot->mmc;
210         spin_unlock_irqrestore(&host->slot_lock, flags);
211 no_claim:
212         del_timer(&host->clk_timer);
213         if (host->current_slot != slot || !claimed)
214                 mmc_omap_fclk_offdelay(host->current_slot);
215
216         if (host->current_slot != slot) {
217                 OMAP_MMC_WRITE(host, CON, slot->saved_con & 0xFC00);
218                 if (host->pdata->switch_slot != NULL)
219                         host->pdata->switch_slot(mmc_dev(slot->mmc), slot->id);
220                 host->current_slot = slot;
221         }
222
223         if (claimed) {
224                 mmc_omap_fclk_enable(host, 1);
225
226                 /* Doing the dummy read here seems to work around some bug
227                  * at least in OMAP24xx silicon where the command would not
228                  * start after writing the CMD register. Sigh. */
229                 OMAP_MMC_READ(host, CON);
230
231                 OMAP_MMC_WRITE(host, CON, slot->saved_con);
232         } else
233                 mmc_omap_fclk_enable(host, 0);
234 }
235
236 static void mmc_omap_start_request(struct mmc_omap_host *host,
237                                    struct mmc_request *req);
238
239 static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
240 {
241         struct mmc_omap_host *host = slot->host;
242         unsigned long flags;
243         int i;
244
245         BUG_ON(slot == NULL || host->mmc == NULL);
246
247         if (clk_enabled)
248                 /* Keeps clock running for at least 8 cycles on valid freq */
249                 mod_timer(&host->clk_timer, jiffies  + HZ/10);
250         else {
251                 del_timer(&host->clk_timer);
252                 mmc_omap_fclk_offdelay(slot);
253                 mmc_omap_fclk_enable(host, 0);
254         }
255
256         spin_lock_irqsave(&host->slot_lock, flags);
257         /* Check for any pending requests */
258         for (i = 0; i < host->nr_slots; i++) {
259                 struct mmc_omap_slot *new_slot;
260                 struct mmc_request *rq;
261
262                 if (host->slots[i] == NULL || host->slots[i]->mrq == NULL)
263                         continue;
264
265                 new_slot = host->slots[i];
266                 /* The current slot should not have a request in queue */
267                 BUG_ON(new_slot == host->current_slot);
268
269                 host->mmc = new_slot->mmc;
270                 spin_unlock_irqrestore(&host->slot_lock, flags);
271                 mmc_omap_select_slot(new_slot, 1);
272                 rq = new_slot->mrq;
273                 new_slot->mrq = NULL;
274                 mmc_omap_start_request(host, rq);
275                 return;
276         }
277
278         host->mmc = NULL;
279         wake_up(&host->slot_wq);
280         spin_unlock_irqrestore(&host->slot_lock, flags);
281 }
282
283 static inline
284 int mmc_omap_cover_is_open(struct mmc_omap_slot *slot)
285 {
286         if (slot->pdata->get_cover_state)
287                 return slot->pdata->get_cover_state(mmc_dev(slot->mmc),
288                                                     slot->id);
289         return 0;
290 }
291
292 static ssize_t
293 mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr,
294                            char *buf)
295 {
296         struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
297         struct mmc_omap_slot *slot = mmc_priv(mmc);
298
299         return sprintf(buf, "%s\n", mmc_omap_cover_is_open(slot) ? "open" :
300                        "closed");
301 }
302
303 static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
304
305 static ssize_t
306 mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr,
307                         char *buf)
308 {
309         struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
310         struct mmc_omap_slot *slot = mmc_priv(mmc);
311
312         return sprintf(buf, "%s\n", slot->pdata->name);
313 }
314
315 static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL);
316
317 static void
318 mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
319 {
320         u32 cmdreg;
321         u32 resptype;
322         u32 cmdtype;
323
324         host->cmd = cmd;
325
326         resptype = 0;
327         cmdtype = 0;
328
329         /* Our hardware needs to know exact type */
330         switch (mmc_resp_type(cmd)) {
331         case MMC_RSP_NONE:
332                 break;
333         case MMC_RSP_R1:
334         case MMC_RSP_R1B:
335                 /* resp 1, 1b, 6, 7 */
336                 resptype = 1;
337                 break;
338         case MMC_RSP_R2:
339                 resptype = 2;
340                 break;
341         case MMC_RSP_R3:
342                 resptype = 3;
343                 break;
344         default:
345                 dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd));
346                 break;
347         }
348
349         if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) {
350                 cmdtype = OMAP_MMC_CMDTYPE_ADTC;
351         } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) {
352                 cmdtype = OMAP_MMC_CMDTYPE_BC;
353         } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) {
354                 cmdtype = OMAP_MMC_CMDTYPE_BCR;
355         } else {
356                 cmdtype = OMAP_MMC_CMDTYPE_AC;
357         }
358
359         cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12);
360
361         if (host->current_slot->bus_mode == MMC_BUSMODE_OPENDRAIN)
362                 cmdreg |= 1 << 6;
363
364         if (cmd->flags & MMC_RSP_BUSY)
365                 cmdreg |= 1 << 11;
366
367         if (host->data && !(host->data->flags & MMC_DATA_WRITE))
368                 cmdreg |= 1 << 15;
369
370         mod_timer(&host->cmd_abort_timer, jiffies + HZ/2);
371
372         OMAP_MMC_WRITE(host, CTO, 200);
373         OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff);
374         OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16);
375         OMAP_MMC_WRITE(host, IE,
376                        OMAP_MMC_STAT_A_EMPTY    | OMAP_MMC_STAT_A_FULL    |
377                        OMAP_MMC_STAT_CMD_CRC    | OMAP_MMC_STAT_CMD_TOUT  |
378                        OMAP_MMC_STAT_DATA_CRC   | OMAP_MMC_STAT_DATA_TOUT |
379                        OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR  |
380                        OMAP_MMC_STAT_END_OF_DATA);
381         OMAP_MMC_WRITE(host, CMD, cmdreg);
382 }
383
384 static void
385 mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
386                      int abort)
387 {
388         enum dma_data_direction dma_data_dir;
389
390         BUG_ON(host->dma_ch < 0);
391         if (data->error)
392                 omap_stop_dma(host->dma_ch);
393         /* Release DMA channel lazily */
394         mod_timer(&host->dma_timer, jiffies + HZ);
395         if (data->flags & MMC_DATA_WRITE)
396                 dma_data_dir = DMA_TO_DEVICE;
397         else
398                 dma_data_dir = DMA_FROM_DEVICE;
399         dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
400                      dma_data_dir);
401 }
402
403 static void
404 mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
405 {
406         if (host->dma_in_use)
407                 mmc_omap_release_dma(host, data, data->error);
408
409         host->data = NULL;
410         host->sg_len = 0;
411
412         /* NOTE:  MMC layer will sometimes poll-wait CMD13 next, issuing
413          * dozens of requests until the card finishes writing data.
414          * It'd be cheaper to just wait till an EOFB interrupt arrives...
415          */
416
417         if (!data->stop) {
418                 struct mmc_host *mmc;
419
420                 host->mrq = NULL;
421                 mmc = host->mmc;
422                 mmc_omap_release_slot(host->current_slot, 1);
423                 mmc_request_done(mmc, data->mrq);
424                 return;
425         }
426
427         mmc_omap_start_command(host, data->stop);
428 }
429
430 static void
431 mmc_omap_send_abort(struct mmc_omap_host *host, int maxloops)
432 {
433         struct mmc_omap_slot *slot = host->current_slot;
434         unsigned int restarts, passes, timeout;
435         u16 stat = 0;
436
437         /* Sending abort takes 80 clocks. Have some extra and round up */
438         timeout = (120*1000000 + slot->fclk_freq - 1)/slot->fclk_freq;
439         restarts = 0;
440         while (restarts < maxloops) {
441                 OMAP_MMC_WRITE(host, STAT, 0xFFFF);
442                 OMAP_MMC_WRITE(host, CMD, (3 << 12) | (1 << 7));
443
444                 passes = 0;
445                 while (passes < timeout) {
446                         stat = OMAP_MMC_READ(host, STAT);
447                         if (stat & OMAP_MMC_STAT_END_OF_CMD)
448                                 goto out;
449                         udelay(1);
450                         passes++;
451                 }
452
453                 restarts++;
454         }
455 out:
456         OMAP_MMC_WRITE(host, STAT, stat);
457 }
458
459 static void
460 mmc_omap_abort_xfer(struct mmc_omap_host *host, struct mmc_data *data)
461 {
462         if (host->dma_in_use)
463                 mmc_omap_release_dma(host, data, 1);
464
465         host->data = NULL;
466         host->sg_len = 0;
467
468         mmc_omap_send_abort(host, 10000);
469 }
470
471 static void
472 mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
473 {
474         unsigned long flags;
475         int done;
476
477         if (!host->dma_in_use) {
478                 mmc_omap_xfer_done(host, data);
479                 return;
480         }
481         done = 0;
482         spin_lock_irqsave(&host->dma_lock, flags);
483         if (host->dma_done)
484                 done = 1;
485         else
486                 host->brs_received = 1;
487         spin_unlock_irqrestore(&host->dma_lock, flags);
488         if (done)
489                 mmc_omap_xfer_done(host, data);
490 }
491
492 static void
493 mmc_omap_dma_timer(unsigned long data)
494 {
495         struct mmc_omap_host *host = (struct mmc_omap_host *) data;
496
497         BUG_ON(host->dma_ch < 0);
498         omap_free_dma(host->dma_ch);
499         host->dma_ch = -1;
500 }
501
502 static void
503 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
504 {
505         unsigned long flags;
506         int done;
507
508         done = 0;
509         spin_lock_irqsave(&host->dma_lock, flags);
510         if (host->brs_received)
511                 done = 1;
512         else
513                 host->dma_done = 1;
514         spin_unlock_irqrestore(&host->dma_lock, flags);
515         if (done)
516                 mmc_omap_xfer_done(host, data);
517 }
518
519 static void
520 mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
521 {
522         host->cmd = NULL;
523
524         del_timer(&host->cmd_abort_timer);
525
526         if (cmd->flags & MMC_RSP_PRESENT) {
527                 if (cmd->flags & MMC_RSP_136) {
528                         /* response type 2 */
529                         cmd->resp[3] =
530                                 OMAP_MMC_READ(host, RSP0) |
531                                 (OMAP_MMC_READ(host, RSP1) << 16);
532                         cmd->resp[2] =
533                                 OMAP_MMC_READ(host, RSP2) |
534                                 (OMAP_MMC_READ(host, RSP3) << 16);
535                         cmd->resp[1] =
536                                 OMAP_MMC_READ(host, RSP4) |
537                                 (OMAP_MMC_READ(host, RSP5) << 16);
538                         cmd->resp[0] =
539                                 OMAP_MMC_READ(host, RSP6) |
540                                 (OMAP_MMC_READ(host, RSP7) << 16);
541                 } else {
542                         /* response types 1, 1b, 3, 4, 5, 6 */
543                         cmd->resp[0] =
544                                 OMAP_MMC_READ(host, RSP6) |
545                                 (OMAP_MMC_READ(host, RSP7) << 16);
546                 }
547         }
548
549         if (host->data == NULL || cmd->error) {
550                 struct mmc_host *mmc;
551
552                 if (host->data != NULL)
553                         mmc_omap_abort_xfer(host, host->data);
554                 host->mrq = NULL;
555                 mmc = host->mmc;
556                 mmc_omap_release_slot(host->current_slot, 1);
557                 mmc_request_done(mmc, cmd->mrq);
558         }
559 }
560
561 /*
562  * Abort stuck command. Can occur when card is removed while it is being
563  * read.
564  */
565 static void mmc_omap_abort_command(struct work_struct *work)
566 {
567         struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
568                                                   cmd_abort_work);
569         BUG_ON(!host->cmd);
570
571         dev_dbg(mmc_dev(host->mmc), "Aborting stuck command CMD%d\n",
572                 host->cmd->opcode);
573
574         if (host->cmd->error == 0)
575                 host->cmd->error = -ETIMEDOUT;
576
577         if (host->data == NULL) {
578                 struct mmc_command *cmd;
579                 struct mmc_host    *mmc;
580
581                 cmd = host->cmd;
582                 host->cmd = NULL;
583                 mmc_omap_send_abort(host, 10000);
584
585                 host->mrq = NULL;
586                 mmc = host->mmc;
587                 mmc_omap_release_slot(host->current_slot, 1);
588                 mmc_request_done(mmc, cmd->mrq);
589         } else
590                 mmc_omap_cmd_done(host, host->cmd);
591
592         host->abort = 0;
593         enable_irq(host->irq);
594 }
595
596 static void
597 mmc_omap_cmd_timer(unsigned long data)
598 {
599         struct mmc_omap_host *host = (struct mmc_omap_host *) data;
600         unsigned long flags;
601
602         spin_lock_irqsave(&host->slot_lock, flags);
603         if (host->cmd != NULL && !host->abort) {
604                 OMAP_MMC_WRITE(host, IE, 0);
605                 disable_irq(host->irq);
606                 host->abort = 1;
607                 schedule_work(&host->cmd_abort_work);
608         }
609         spin_unlock_irqrestore(&host->slot_lock, flags);
610 }
611
612 /* PIO only */
613 static void
614 mmc_omap_sg_to_buf(struct mmc_omap_host *host)
615 {
616         struct scatterlist *sg;
617
618         sg = host->data->sg + host->sg_idx;
619         host->buffer_bytes_left = sg->length;
620         host->buffer = sg_virt(sg);
621         if (host->buffer_bytes_left > host->total_bytes_left)
622                 host->buffer_bytes_left = host->total_bytes_left;
623 }
624
625 static void
626 mmc_omap_clk_timer(unsigned long data)
627 {
628         struct mmc_omap_host *host = (struct mmc_omap_host *) data;
629
630         mmc_omap_fclk_enable(host, 0);
631 }
632
633 /* PIO only */
634 static void
635 mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
636 {
637         int n;
638
639         if (host->buffer_bytes_left == 0) {
640                 host->sg_idx++;
641                 BUG_ON(host->sg_idx == host->sg_len);
642                 mmc_omap_sg_to_buf(host);
643         }
644         n = 64;
645         if (n > host->buffer_bytes_left)
646                 n = host->buffer_bytes_left;
647         host->buffer_bytes_left -= n;
648         host->total_bytes_left -= n;
649         host->data->bytes_xfered += n;
650
651         if (write) {
652                 __raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
653         } else {
654                 __raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n);
655         }
656 }
657
658 static inline void mmc_omap_report_irq(u16 status)
659 {
660         static const char *mmc_omap_status_bits[] = {
661                 "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",
662                 "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"
663         };
664         int i, c = 0;
665
666         for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
667                 if (status & (1 << i)) {
668                         if (c)
669                                 printk(" ");
670                         printk("%s", mmc_omap_status_bits[i]);
671                         c++;
672                 }
673 }
674
675 static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
676 {
677         struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id;
678         u16 status;
679         int end_command;
680         int end_transfer;
681         int transfer_error, cmd_error;
682
683         if (host->cmd == NULL && host->data == NULL) {
684                 status = OMAP_MMC_READ(host, STAT);
685                 dev_info(mmc_dev(host->slots[0]->mmc),
686                          "Spurious IRQ 0x%04x\n", status);
687                 if (status != 0) {
688                         OMAP_MMC_WRITE(host, STAT, status);
689                         OMAP_MMC_WRITE(host, IE, 0);
690                 }
691                 return IRQ_HANDLED;
692         }
693
694         end_command = 0;
695         end_transfer = 0;
696         transfer_error = 0;
697         cmd_error = 0;
698
699         while ((status = OMAP_MMC_READ(host, STAT)) != 0) {
700                 int cmd;
701
702                 OMAP_MMC_WRITE(host, STAT, status);
703                 if (host->cmd != NULL)
704                         cmd = host->cmd->opcode;
705                 else
706                         cmd = -1;
707 #ifdef CONFIG_MMC_DEBUG
708                 dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",
709                         status, cmd);
710                 mmc_omap_report_irq(status);
711                 printk("\n");
712 #endif
713                 if (host->total_bytes_left) {
714                         if ((status & OMAP_MMC_STAT_A_FULL) ||
715                             (status & OMAP_MMC_STAT_END_OF_DATA))
716                                 mmc_omap_xfer_data(host, 0);
717                         if (status & OMAP_MMC_STAT_A_EMPTY)
718                                 mmc_omap_xfer_data(host, 1);
719                 }
720
721                 if (status & OMAP_MMC_STAT_END_OF_DATA)
722                         end_transfer = 1;
723
724                 if (status & OMAP_MMC_STAT_DATA_TOUT) {
725                         dev_dbg(mmc_dev(host->mmc), "data timeout (CMD%d)\n",
726                                 cmd);
727                         if (host->data) {
728                                 host->data->error = -ETIMEDOUT;
729                                 transfer_error = 1;
730                         }
731                 }
732
733                 if (status & OMAP_MMC_STAT_DATA_CRC) {
734                         if (host->data) {
735                                 host->data->error = -EILSEQ;
736                                 dev_dbg(mmc_dev(host->mmc),
737                                          "data CRC error, bytes left %d\n",
738                                         host->total_bytes_left);
739                                 transfer_error = 1;
740                         } else {
741                                 dev_dbg(mmc_dev(host->mmc), "data CRC error\n");
742                         }
743                 }
744
745                 if (status & OMAP_MMC_STAT_CMD_TOUT) {
746                         /* Timeouts are routine with some commands */
747                         if (host->cmd) {
748                                 struct mmc_omap_slot *slot =
749                                         host->current_slot;
750                                 if (slot == NULL ||
751                                     !mmc_omap_cover_is_open(slot))
752                                         dev_err(mmc_dev(host->mmc),
753                                                 "command timeout (CMD%d)\n",
754                                                 cmd);
755                                 host->cmd->error = -ETIMEDOUT;
756                                 end_command = 1;
757                                 cmd_error = 1;
758                         }
759                 }
760
761                 if (status & OMAP_MMC_STAT_CMD_CRC) {
762                         if (host->cmd) {
763                                 dev_err(mmc_dev(host->mmc),
764                                         "command CRC error (CMD%d, arg 0x%08x)\n",
765                                         cmd, host->cmd->arg);
766                                 host->cmd->error = -EILSEQ;
767                                 end_command = 1;
768                                 cmd_error = 1;
769                         } else
770                                 dev_err(mmc_dev(host->mmc),
771                                         "command CRC error without cmd?\n");
772                 }
773
774                 if (status & OMAP_MMC_STAT_CARD_ERR) {
775                         dev_dbg(mmc_dev(host->mmc),
776                                 "ignoring card status error (CMD%d)\n",
777                                 cmd);
778                         end_command = 1;
779                 }
780
781                 /*
782                  * NOTE: On 1610 the END_OF_CMD may come too early when
783                  * starting a write
784                  */
785                 if ((status & OMAP_MMC_STAT_END_OF_CMD) &&
786                     (!(status & OMAP_MMC_STAT_A_EMPTY))) {
787                         end_command = 1;
788                 }
789         }
790
791         if (cmd_error && host->data) {
792                 del_timer(&host->cmd_abort_timer);
793                 host->abort = 1;
794                 OMAP_MMC_WRITE(host, IE, 0);
795                 disable_irq(host->irq);
796                 schedule_work(&host->cmd_abort_work);
797                 return IRQ_HANDLED;
798         }
799
800         if (end_command)
801                 mmc_omap_cmd_done(host, host->cmd);
802         if (host->data != NULL) {
803                 if (transfer_error)
804                         mmc_omap_xfer_done(host, host->data);
805                 else if (end_transfer)
806                         mmc_omap_end_of_data(host, host->data);
807         }
808
809         return IRQ_HANDLED;
810 }
811
812 void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
813 {
814         int cover_open;
815         struct mmc_omap_host *host = dev_get_drvdata(dev);
816         struct mmc_omap_slot *slot = host->slots[num];
817
818         BUG_ON(num >= host->nr_slots);
819
820         /* Other subsystems can call in here before we're initialised. */
821         if (host->nr_slots == 0 || !host->slots[num])
822                 return;
823
824         cover_open = mmc_omap_cover_is_open(slot);
825         if (cover_open != slot->cover_open) {
826                 slot->cover_open = cover_open;
827                 sysfs_notify(&slot->mmc->class_dev.kobj, NULL, "cover_switch");
828         }
829
830         tasklet_hi_schedule(&slot->cover_tasklet);
831 }
832
833 static void mmc_omap_cover_timer(unsigned long arg)
834 {
835         struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg;
836         tasklet_schedule(&slot->cover_tasklet);
837 }
838
839 static void mmc_omap_cover_handler(unsigned long param)
840 {
841         struct mmc_omap_slot *slot = (struct mmc_omap_slot *)param;
842         int cover_open = mmc_omap_cover_is_open(slot);
843
844         mmc_detect_change(slot->mmc, 0);
845         if (!cover_open)
846                 return;
847
848         /*
849          * If no card is inserted, we postpone polling until
850          * the cover has been closed.
851          */
852         if (slot->mmc->card == NULL || !mmc_card_present(slot->mmc->card))
853                 return;
854
855         mod_timer(&slot->cover_timer,
856                   jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));
857 }
858
859 /* Prepare to transfer the next segment of a scatterlist */
860 static void
861 mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
862 {
863         int dma_ch = host->dma_ch;
864         unsigned long data_addr;
865         u16 buf, frame;
866         u32 count;
867         struct scatterlist *sg = &data->sg[host->sg_idx];
868         int src_port = 0;
869         int dst_port = 0;
870         int sync_dev = 0;
871
872         data_addr = host->phys_base + OMAP_MMC_REG_DATA;
873         frame = data->blksz;
874         count = sg_dma_len(sg);
875
876         if ((data->blocks == 1) && (count > data->blksz))
877                 count = frame;
878
879         host->dma_len = count;
880
881         /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
882          * Use 16 or 32 word frames when the blocksize is at least that large.
883          * Blocksize is usually 512 bytes; but not for some SD reads.
884          */
885         if (cpu_is_omap15xx() && frame > 32)
886                 frame = 32;
887         else if (frame > 64)
888                 frame = 64;
889         count /= frame;
890         frame >>= 1;
891
892         if (!(data->flags & MMC_DATA_WRITE)) {
893                 buf = 0x800f | ((frame - 1) << 8);
894
895                 if (cpu_class_is_omap1()) {
896                         src_port = OMAP_DMA_PORT_TIPB;
897                         dst_port = OMAP_DMA_PORT_EMIFF;
898                 }
899                 if (cpu_is_omap24xx())
900                         sync_dev = OMAP24XX_DMA_MMC1_RX;
901
902                 omap_set_dma_src_params(dma_ch, src_port,
903                                         OMAP_DMA_AMODE_CONSTANT,
904                                         data_addr, 0, 0);
905                 omap_set_dma_dest_params(dma_ch, dst_port,
906                                          OMAP_DMA_AMODE_POST_INC,
907                                          sg_dma_address(sg), 0, 0);
908                 omap_set_dma_dest_data_pack(dma_ch, 1);
909                 omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
910         } else {
911                 buf = 0x0f80 | ((frame - 1) << 0);
912
913                 if (cpu_class_is_omap1()) {
914                         src_port = OMAP_DMA_PORT_EMIFF;
915                         dst_port = OMAP_DMA_PORT_TIPB;
916                 }
917                 if (cpu_is_omap24xx())
918                         sync_dev = OMAP24XX_DMA_MMC1_TX;
919
920                 omap_set_dma_dest_params(dma_ch, dst_port,
921                                          OMAP_DMA_AMODE_CONSTANT,
922                                          data_addr, 0, 0);
923                 omap_set_dma_src_params(dma_ch, src_port,
924                                         OMAP_DMA_AMODE_POST_INC,
925                                         sg_dma_address(sg), 0, 0);
926                 omap_set_dma_src_data_pack(dma_ch, 1);
927                 omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
928         }
929
930         /* Max limit for DMA frame count is 0xffff */
931         BUG_ON(count > 0xffff);
932
933         OMAP_MMC_WRITE(host, BUF, buf);
934         omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
935                                      frame, count, OMAP_DMA_SYNC_FRAME,
936                                      sync_dev, 0);
937 }
938
939 /* A scatterlist segment completed */
940 static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
941 {
942         struct mmc_omap_host *host = (struct mmc_omap_host *) data;
943         struct mmc_data *mmcdat = host->data;
944
945         if (unlikely(host->dma_ch < 0)) {
946                 dev_err(mmc_dev(host->mmc),
947                         "DMA callback while DMA not enabled\n");
948                 return;
949         }
950         /* FIXME: We really should do something to _handle_ the errors */
951         if (ch_status & OMAP1_DMA_TOUT_IRQ) {
952                 dev_err(mmc_dev(host->mmc),"DMA timeout\n");
953                 return;
954         }
955         if (ch_status & OMAP_DMA_DROP_IRQ) {
956                 dev_err(mmc_dev(host->mmc), "DMA sync error\n");
957                 return;
958         }
959         if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
960                 return;
961         }
962         mmcdat->bytes_xfered += host->dma_len;
963         host->sg_idx++;
964         if (host->sg_idx < host->sg_len) {
965                 mmc_omap_prepare_dma(host, host->data);
966                 omap_start_dma(host->dma_ch);
967         } else
968                 mmc_omap_dma_done(host, host->data);
969 }
970
971 static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
972 {
973         const char *dev_name;
974         int sync_dev, dma_ch, is_read, r;
975
976         is_read = !(data->flags & MMC_DATA_WRITE);
977         del_timer_sync(&host->dma_timer);
978         if (host->dma_ch >= 0) {
979                 if (is_read == host->dma_is_read)
980                         return 0;
981                 omap_free_dma(host->dma_ch);
982                 host->dma_ch = -1;
983         }
984
985         if (is_read) {
986                 if (host->id == 1) {
987                         sync_dev = OMAP_DMA_MMC_RX;
988                         dev_name = "MMC1 read";
989                 } else {
990                         sync_dev = OMAP_DMA_MMC2_RX;
991                         dev_name = "MMC2 read";
992                 }
993         } else {
994                 if (host->id == 1) {
995                         sync_dev = OMAP_DMA_MMC_TX;
996                         dev_name = "MMC1 write";
997                 } else {
998                         sync_dev = OMAP_DMA_MMC2_TX;
999                         dev_name = "MMC2 write";
1000                 }
1001         }
1002         r = omap_request_dma(sync_dev, dev_name, mmc_omap_dma_cb,
1003                              host, &dma_ch);
1004         if (r != 0) {
1005                 dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
1006                 return r;
1007         }
1008         host->dma_ch = dma_ch;
1009         host->dma_is_read = is_read;
1010
1011         return 0;
1012 }
1013
1014 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
1015 {
1016         u16 reg;
1017
1018         reg = OMAP_MMC_READ(host, SDIO);
1019         reg &= ~(1 << 5);
1020         OMAP_MMC_WRITE(host, SDIO, reg);
1021         /* Set maximum timeout */
1022         OMAP_MMC_WRITE(host, CTO, 0xff);
1023 }
1024
1025 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
1026 {
1027         unsigned int timeout, cycle_ns;
1028         u16 reg;
1029
1030         cycle_ns = 1000000000 / host->current_slot->fclk_freq;
1031         timeout = req->data->timeout_ns / cycle_ns;
1032         timeout += req->data->timeout_clks;
1033
1034         /* Check if we need to use timeout multiplier register */
1035         reg = OMAP_MMC_READ(host, SDIO);
1036         if (timeout > 0xffff) {
1037                 reg |= (1 << 5);
1038                 timeout /= 1024;
1039         } else
1040                 reg &= ~(1 << 5);
1041         OMAP_MMC_WRITE(host, SDIO, reg);
1042         OMAP_MMC_WRITE(host, DTO, timeout);
1043 }
1044
1045 static void
1046 mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
1047 {
1048         struct mmc_data *data = req->data;
1049         int i, use_dma, block_size;
1050         unsigned sg_len;
1051
1052         host->data = data;
1053         if (data == NULL) {
1054                 OMAP_MMC_WRITE(host, BLEN, 0);
1055                 OMAP_MMC_WRITE(host, NBLK, 0);
1056                 OMAP_MMC_WRITE(host, BUF, 0);
1057                 host->dma_in_use = 0;
1058                 set_cmd_timeout(host, req);
1059                 return;
1060         }
1061
1062         block_size = data->blksz;
1063
1064         OMAP_MMC_WRITE(host, NBLK, data->blocks - 1);
1065         OMAP_MMC_WRITE(host, BLEN, block_size - 1);
1066         set_data_timeout(host, req);
1067
1068         /* cope with calling layer confusion; it issues "single
1069          * block" writes using multi-block scatterlists.
1070          */
1071         sg_len = (data->blocks == 1) ? 1 : data->sg_len;
1072
1073         /* Only do DMA for entire blocks */
1074         use_dma = host->use_dma;
1075         if (use_dma) {
1076                 for (i = 0; i < sg_len; i++) {
1077                         if ((data->sg[i].length % block_size) != 0) {
1078                                 use_dma = 0;
1079                                 break;
1080                         }
1081                 }
1082         }
1083
1084         host->sg_idx = 0;
1085         if (use_dma) {
1086                 if (mmc_omap_get_dma_channel(host, data) == 0) {
1087                         enum dma_data_direction dma_data_dir;
1088
1089                         if (data->flags & MMC_DATA_WRITE)
1090                                 dma_data_dir = DMA_TO_DEVICE;
1091                         else
1092                                 dma_data_dir = DMA_FROM_DEVICE;
1093
1094                         host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
1095                                                 sg_len, dma_data_dir);
1096                         host->total_bytes_left = 0;
1097                         mmc_omap_prepare_dma(host, req->data);
1098                         host->brs_received = 0;
1099                         host->dma_done = 0;
1100                         host->dma_in_use = 1;
1101                 } else
1102                         use_dma = 0;
1103         }
1104
1105         /* Revert to PIO? */
1106         if (!use_dma) {
1107                 OMAP_MMC_WRITE(host, BUF, 0x1f1f);
1108                 host->total_bytes_left = data->blocks * block_size;
1109                 host->sg_len = sg_len;
1110                 mmc_omap_sg_to_buf(host);
1111                 host->dma_in_use = 0;
1112         }
1113 }
1114
1115 static void mmc_omap_start_request(struct mmc_omap_host *host,
1116                                    struct mmc_request *req)
1117 {
1118         BUG_ON(host->mrq != NULL);
1119
1120         host->mrq = req;
1121
1122         /* only touch fifo AFTER the controller readies it */
1123         mmc_omap_prepare_data(host, req);
1124         mmc_omap_start_command(host, req->cmd);
1125         if (host->dma_in_use)
1126                 omap_start_dma(host->dma_ch);
1127         BUG_ON(irqs_disabled());
1128 }
1129
1130 static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
1131 {
1132         struct mmc_omap_slot *slot = mmc_priv(mmc);
1133         struct mmc_omap_host *host = slot->host;
1134         unsigned long flags;
1135
1136         spin_lock_irqsave(&host->slot_lock, flags);
1137         if (host->mmc != NULL) {
1138                 BUG_ON(slot->mrq != NULL);
1139                 slot->mrq = req;
1140                 spin_unlock_irqrestore(&host->slot_lock, flags);
1141                 return;
1142         } else
1143                 host->mmc = mmc;
1144         spin_unlock_irqrestore(&host->slot_lock, flags);
1145         mmc_omap_select_slot(slot, 1);
1146         mmc_omap_start_request(host, req);
1147 }
1148
1149 static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
1150                                 int vdd)
1151 {
1152         struct mmc_omap_host *host;
1153
1154         host = slot->host;
1155
1156         if (slot->pdata->set_power != NULL)
1157                 slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
1158                                         vdd);
1159
1160         if (cpu_is_omap24xx()) {
1161                 u16 w;
1162
1163                 if (power_on) {
1164                         w = OMAP_MMC_READ(host, CON);
1165                         OMAP_MMC_WRITE(host, CON, w | (1 << 11));
1166                 } else {
1167                         w = OMAP_MMC_READ(host, CON);
1168                         OMAP_MMC_WRITE(host, CON, w & ~(1 << 11));
1169                 }
1170         }
1171 }
1172
1173 static int mmc_omap_calc_divisor(struct mmc_host *mmc, struct mmc_ios *ios)
1174 {
1175         struct mmc_omap_slot *slot = mmc_priv(mmc);
1176         struct mmc_omap_host *host = slot->host;
1177         int func_clk_rate = clk_get_rate(host->fclk);
1178         int dsor;
1179
1180         if (ios->clock == 0)
1181                 return 0;
1182
1183         dsor = func_clk_rate / ios->clock;
1184         if (dsor < 1)
1185                 dsor = 1;
1186
1187         if (func_clk_rate / dsor > ios->clock)
1188                 dsor++;
1189
1190         if (dsor > 250)
1191                 dsor = 250;
1192
1193         slot->fclk_freq = func_clk_rate / dsor;
1194
1195         if (ios->bus_width == MMC_BUS_WIDTH_4)
1196                 dsor |= 1 << 15;
1197
1198         return dsor;
1199 }
1200
1201 static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1202 {
1203         struct mmc_omap_slot *slot = mmc_priv(mmc);
1204         struct mmc_omap_host *host = slot->host;
1205         int i, dsor;
1206         int clk_enabled;
1207
1208         mmc_omap_select_slot(slot, 0);
1209
1210         dsor = mmc_omap_calc_divisor(mmc, ios);
1211
1212         if (ios->vdd != slot->vdd)
1213                 slot->vdd = ios->vdd;
1214
1215         clk_enabled = 0;
1216         switch (ios->power_mode) {
1217         case MMC_POWER_OFF:
1218                 mmc_omap_set_power(slot, 0, ios->vdd);
1219                 break;
1220         case MMC_POWER_UP:
1221                 /* Cannot touch dsor yet, just power up MMC */
1222                 mmc_omap_set_power(slot, 1, ios->vdd);
1223                 goto exit;
1224         case MMC_POWER_ON:
1225                 mmc_omap_fclk_enable(host, 1);
1226                 clk_enabled = 1;
1227                 dsor |= 1 << 11;
1228                 break;
1229         }
1230
1231         if (slot->bus_mode != ios->bus_mode) {
1232                 if (slot->pdata->set_bus_mode != NULL)
1233                         slot->pdata->set_bus_mode(mmc_dev(mmc), slot->id,
1234                                                   ios->bus_mode);
1235                 slot->bus_mode = ios->bus_mode;
1236         }
1237
1238         /* On insanely high arm_per frequencies something sometimes
1239          * goes somehow out of sync, and the POW bit is not being set,
1240          * which results in the while loop below getting stuck.
1241          * Writing to the CON register twice seems to do the trick. */
1242         for (i = 0; i < 2; i++)
1243                 OMAP_MMC_WRITE(host, CON, dsor);
1244         slot->saved_con = dsor;
1245         if (ios->power_mode == MMC_POWER_ON) {
1246                 /* Send clock cycles, poll completion */
1247                 OMAP_MMC_WRITE(host, IE, 0);
1248                 OMAP_MMC_WRITE(host, STAT, 0xffff);
1249                 OMAP_MMC_WRITE(host, CMD, 1 << 7);
1250                 while ((OMAP_MMC_READ(host, STAT) & 1) == 0);
1251                 OMAP_MMC_WRITE(host, STAT, 1);
1252         }
1253
1254 exit:
1255         mmc_omap_release_slot(slot, clk_enabled);
1256 }
1257
1258 static const struct mmc_host_ops mmc_omap_ops = {
1259         .request        = mmc_omap_request,
1260         .set_ios        = mmc_omap_set_ios,
1261 };
1262
1263 static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
1264 {
1265         struct mmc_omap_slot *slot = NULL;
1266         struct mmc_host *mmc;
1267         int r;
1268
1269         mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev);
1270         if (mmc == NULL)
1271                 return -ENOMEM;
1272
1273         slot = mmc_priv(mmc);
1274         slot->host = host;
1275         slot->mmc = mmc;
1276         slot->id = id;
1277         slot->pdata = &host->pdata->slots[id];
1278
1279         host->slots[id] = slot;
1280
1281         mmc->caps = MMC_CAP_MULTIWRITE;
1282         if (host->pdata->conf.wire4)
1283                 mmc->caps |= MMC_CAP_4_BIT_DATA;
1284
1285         mmc->ops = &mmc_omap_ops;
1286         mmc->f_min = 400000;
1287
1288         if (cpu_class_is_omap2())
1289                 mmc->f_max = 48000000;
1290         else
1291                 mmc->f_max = 24000000;
1292         if (host->pdata->max_freq)
1293                 mmc->f_max = min(host->pdata->max_freq, mmc->f_max);
1294         mmc->ocr_avail = slot->pdata->ocr_mask;
1295
1296         /* Use scatterlist DMA to reduce per-transfer costs.
1297          * NOTE max_seg_size assumption that small blocks aren't
1298          * normally used (except e.g. for reading SD registers).
1299          */
1300         mmc->max_phys_segs = 32;
1301         mmc->max_hw_segs = 32;
1302         mmc->max_blk_size = 2048;       /* BLEN is 11 bits (+1) */
1303         mmc->max_blk_count = 2048;      /* NBLK is 11 bits (+1) */
1304         mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1305         mmc->max_seg_size = mmc->max_req_size;
1306
1307         r = mmc_add_host(mmc);
1308         if (r < 0)
1309                 goto err_remove_host;
1310
1311         if (slot->pdata->name != NULL) {
1312                 r = device_create_file(&mmc->class_dev,
1313                                         &dev_attr_slot_name);
1314                 if (r < 0)
1315                         goto err_remove_host;
1316         }
1317
1318         if (slot->pdata->get_cover_state != NULL) {
1319                 r = device_create_file(&mmc->class_dev,
1320                                         &dev_attr_cover_switch);
1321                 if (r < 0)
1322                         goto err_remove_slot_name;
1323
1324                 setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
1325                             (unsigned long)slot);
1326                 tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
1327                              (unsigned long)slot);
1328                 tasklet_schedule(&slot->cover_tasklet);
1329         }
1330
1331         return 0;
1332
1333 err_remove_slot_name:
1334         if (slot->pdata->name != NULL)
1335                 device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
1336 err_remove_host:
1337         mmc_remove_host(mmc);
1338         mmc_free_host(mmc);
1339         return r;
1340 }
1341
1342 static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
1343 {
1344         struct mmc_host *mmc = slot->mmc;
1345
1346         if (slot->pdata->name != NULL)
1347                 device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
1348         if (slot->pdata->get_cover_state != NULL)
1349                 device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
1350
1351         tasklet_kill(&slot->cover_tasklet);
1352         del_timer_sync(&slot->cover_timer);
1353         flush_scheduled_work();
1354
1355         mmc_remove_host(mmc);
1356         mmc_free_host(mmc);
1357 }
1358
1359 static int __init mmc_omap_probe(struct platform_device *pdev)
1360 {
1361         struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
1362         struct mmc_omap_host *host = NULL;
1363         struct resource *res;
1364         int i, ret = 0;
1365         int irq;
1366
1367         if (pdata == NULL) {
1368                 dev_err(&pdev->dev, "platform data missing\n");
1369                 return -ENXIO;
1370         }
1371         if (pdata->nr_slots == 0) {
1372                 dev_err(&pdev->dev, "no slots\n");
1373                 return -ENXIO;
1374         }
1375
1376         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1377         irq = platform_get_irq(pdev, 0);
1378         if (res == NULL || irq < 0)
1379                 return -ENXIO;
1380
1381         res = request_mem_region(res->start, res->end - res->start + 1,
1382                                  pdev->name);
1383         if (res == NULL)
1384                 return -EBUSY;
1385
1386         host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL);
1387         if (host == NULL) {
1388                 ret = -ENOMEM;
1389                 goto err_free_mem_region;
1390         }
1391
1392         INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
1393         setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
1394                     (unsigned long) host);
1395
1396         spin_lock_init(&host->clk_lock);
1397         setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
1398
1399         spin_lock_init(&host->dma_lock);
1400         setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
1401         spin_lock_init(&host->slot_lock);
1402         init_waitqueue_head(&host->slot_wq);
1403
1404         host->pdata = pdata;
1405         host->dev = &pdev->dev;
1406         platform_set_drvdata(pdev, host);
1407
1408         host->id = pdev->id;
1409         host->mem_res = res;
1410         host->irq = irq;
1411
1412         host->use_dma = 1;
1413         host->dma_ch = -1;
1414
1415         host->irq = irq;
1416         host->phys_base = host->mem_res->start;
1417         host->virt_base = (void __iomem *) IO_ADDRESS(host->phys_base);
1418
1419         if (cpu_is_omap24xx()) {
1420                 host->iclk = clk_get(&pdev->dev, "mmc_ick");
1421                 if (IS_ERR(host->iclk))
1422                         goto err_free_mmc_host;
1423                 clk_enable(host->iclk);
1424         }
1425
1426         if (!cpu_is_omap24xx())
1427                 host->fclk = clk_get(&pdev->dev, "mmc_ck");
1428         else
1429                 host->fclk = clk_get(&pdev->dev, "mmc_fck");
1430
1431         if (IS_ERR(host->fclk)) {
1432                 ret = PTR_ERR(host->fclk);
1433                 goto err_free_iclk;
1434         }
1435
1436         ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
1437         if (ret)
1438                 goto err_free_fclk;
1439
1440         if (pdata->init != NULL) {
1441                 ret = pdata->init(&pdev->dev);
1442                 if (ret < 0)
1443                         goto err_free_irq;
1444         }
1445
1446         host->nr_slots = pdata->nr_slots;
1447         for (i = 0; i < pdata->nr_slots; i++) {
1448                 ret = mmc_omap_new_slot(host, i);
1449                 if (ret < 0) {
1450                         while (--i >= 0)
1451                                 mmc_omap_remove_slot(host->slots[i]);
1452
1453                         goto err_plat_cleanup;
1454                 }
1455         }
1456
1457         return 0;
1458
1459 err_plat_cleanup:
1460         if (pdata->cleanup)
1461                 pdata->cleanup(&pdev->dev);
1462 err_free_irq:
1463         free_irq(host->irq, host);
1464 err_free_fclk:
1465         clk_put(host->fclk);
1466 err_free_iclk:
1467         if (host->iclk != NULL) {
1468                 clk_disable(host->iclk);
1469                 clk_put(host->iclk);
1470         }
1471 err_free_mmc_host:
1472         kfree(host);
1473 err_free_mem_region:
1474         release_mem_region(res->start, res->end - res->start + 1);
1475         return ret;
1476 }
1477
1478 static int mmc_omap_remove(struct platform_device *pdev)
1479 {
1480         struct mmc_omap_host *host = platform_get_drvdata(pdev);
1481         int i;
1482
1483         platform_set_drvdata(pdev, NULL);
1484
1485         BUG_ON(host == NULL);
1486
1487         for (i = 0; i < host->nr_slots; i++)
1488                 mmc_omap_remove_slot(host->slots[i]);
1489
1490         if (host->pdata->cleanup)
1491                 host->pdata->cleanup(&pdev->dev);
1492
1493         if (host->iclk && !IS_ERR(host->iclk))
1494                 clk_put(host->iclk);
1495         if (host->fclk && !IS_ERR(host->fclk))
1496                 clk_put(host->fclk);
1497
1498         release_mem_region(pdev->resource[0].start,
1499                            pdev->resource[0].end - pdev->resource[0].start + 1);
1500
1501         kfree(host);
1502
1503         return 0;
1504 }
1505
1506 #ifdef CONFIG_PM
1507 static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1508 {
1509         int i, ret = 0;
1510         struct mmc_omap_host *host = platform_get_drvdata(pdev);
1511
1512         if (host == NULL || host->suspended)
1513                 return 0;
1514
1515         for (i = 0; i < host->nr_slots; i++) {
1516                 struct mmc_omap_slot *slot;
1517
1518                 slot = host->slots[i];
1519                 ret = mmc_suspend_host(slot->mmc, mesg);
1520                 if (ret < 0) {
1521                         while (--i >= 0) {
1522                                 slot = host->slots[i];
1523                                 mmc_resume_host(slot->mmc);
1524                         }
1525                         return ret;
1526                 }
1527         }
1528         host->suspended = 1;
1529         return 0;
1530 }
1531
1532 static int mmc_omap_resume(struct platform_device *pdev)
1533 {
1534         int i, ret = 0;
1535         struct mmc_omap_host *host = platform_get_drvdata(pdev);
1536
1537         if (host == NULL || !host->suspended)
1538                 return 0;
1539
1540         for (i = 0; i < host->nr_slots; i++) {
1541                 struct mmc_omap_slot *slot;
1542                 slot = host->slots[i];
1543                 ret = mmc_resume_host(slot->mmc);
1544                 if (ret < 0)
1545                         return ret;
1546
1547                 host->suspended = 0;
1548         }
1549         return 0;
1550 }
1551 #else
1552 #define mmc_omap_suspend        NULL
1553 #define mmc_omap_resume         NULL
1554 #endif
1555
1556 static struct platform_driver mmc_omap_driver = {
1557         .probe          = mmc_omap_probe,
1558         .remove         = mmc_omap_remove,
1559         .suspend        = mmc_omap_suspend,
1560         .resume         = mmc_omap_resume,
1561         .driver         = {
1562                 .name   = DRIVER_NAME,
1563                 .owner  = THIS_MODULE,
1564         },
1565 };
1566
1567 static int __init mmc_omap_init(void)
1568 {
1569         return platform_driver_register(&mmc_omap_driver);
1570 }
1571
1572 static void __exit mmc_omap_exit(void)
1573 {
1574         platform_driver_unregister(&mmc_omap_driver);
1575 }
1576
1577 module_init(mmc_omap_init);
1578 module_exit(mmc_omap_exit);
1579
1580 MODULE_DESCRIPTION("OMAP Multimedia Card driver");
1581 MODULE_LICENSE("GPL");
1582 MODULE_ALIAS("platform:" DRIVER_NAME);
1583 MODULE_AUTHOR("Juha Yrjölä");