X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fmmc%2Fhost%2Fsdhci.c;h=e3a8133560a2b41a19f15dd193a83c80d5727e4f;hb=b7ac2cf1cdf346b34cbc2104d386a9d29d12aa4c;hp=95b081a9967bf01b08cbfb233985a228a319994c;hpb=ee53ab5d73998e502801c024a08de2c39a92c52a;p=linux-2.6 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 95b081a996..e3a8133560 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -124,7 +124,8 @@ static void sdhci_init(struct sdhci_host *host) SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT | SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | - SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE; + SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE | + SDHCI_INT_ADMA_ERROR; writel(intmask, host->ioaddr + SDHCI_INT_ENABLE); writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE); @@ -172,119 +173,95 @@ static void sdhci_led_control(struct led_classdev *led, * * \*****************************************************************************/ -static inline char* sdhci_sg_to_buffer(struct sdhci_host* host) -{ - return sg_virt(host->cur_sg); -} - -static inline int sdhci_next_sg(struct sdhci_host* host) -{ - /* - * Skip to next SG entry. - */ - host->cur_sg++; - host->num_sg--; - - /* - * Any entries left? - */ - if (host->num_sg > 0) { - host->offset = 0; - host->remain = host->cur_sg->length; - } - - return host->num_sg; -} - static void sdhci_read_block_pio(struct sdhci_host *host) { - int blksize, chunk_remain; - u32 data; - char *buffer; - int size; + unsigned long flags; + size_t blksize, len, chunk; + u32 scratch; + u8 *buf; DBG("PIO reading\n"); blksize = host->data->blksz; - chunk_remain = 0; - data = 0; + chunk = 0; - buffer = sdhci_sg_to_buffer(host) + host->offset; + local_irq_save(flags); while (blksize) { - if (chunk_remain == 0) { - data = readl(host->ioaddr + SDHCI_BUFFER); - chunk_remain = min(blksize, 4); - } + if (!sg_miter_next(&host->sg_miter)) + BUG(); - size = min(host->remain, chunk_remain); + len = min(host->sg_miter.length, blksize); - chunk_remain -= size; - blksize -= size; - host->offset += size; - host->remain -= size; + blksize -= len; + host->sg_miter.consumed = len; - while (size) { - *buffer = data & 0xFF; - buffer++; - data >>= 8; - size--; - } + buf = host->sg_miter.addr; - if (host->remain == 0) { - if (sdhci_next_sg(host) == 0) { - BUG_ON(blksize != 0); - return; + while (len) { + if (chunk == 0) { + scratch = readl(host->ioaddr + SDHCI_BUFFER); + chunk = 4; } - buffer = sdhci_sg_to_buffer(host); + + *buf = scratch & 0xFF; + + buf++; + scratch >>= 8; + chunk--; + len--; } } + + sg_miter_stop(&host->sg_miter); + + local_irq_restore(flags); } static void sdhci_write_block_pio(struct sdhci_host *host) { - int blksize, chunk_remain; - u32 data; - char *buffer; - int bytes, size; + unsigned long flags; + size_t blksize, len, chunk; + u32 scratch; + u8 *buf; DBG("PIO writing\n"); blksize = host->data->blksz; - chunk_remain = 4; - data = 0; + chunk = 0; + scratch = 0; - bytes = 0; - buffer = sdhci_sg_to_buffer(host) + host->offset; + local_irq_save(flags); while (blksize) { - size = min(host->remain, chunk_remain); - - chunk_remain -= size; - blksize -= size; - host->offset += size; - host->remain -= size; - - while (size) { - data >>= 8; - data |= (u32)*buffer << 24; - buffer++; - size--; - } + if (!sg_miter_next(&host->sg_miter)) + BUG(); - if (chunk_remain == 0) { - writel(data, host->ioaddr + SDHCI_BUFFER); - chunk_remain = min(blksize, 4); - } + len = min(host->sg_miter.length, blksize); - if (host->remain == 0) { - if (sdhci_next_sg(host) == 0) { - BUG_ON(blksize != 0); - return; + blksize -= len; + host->sg_miter.consumed = len; + + buf = host->sg_miter.addr; + + while (len) { + scratch |= (u32)*buf << (chunk * 8); + + buf++; + chunk++; + len--; + + if ((chunk == 4) || ((len == 0) && (blksize == 0))) { + writel(scratch, host->ioaddr + SDHCI_BUFFER); + chunk = 0; + scratch = 0; } - buffer = sdhci_sg_to_buffer(host); } } + + sg_miter_stop(&host->sg_miter); + + local_irq_restore(flags); } static void sdhci_transfer_pio(struct sdhci_host *host) @@ -293,7 +270,7 @@ static void sdhci_transfer_pio(struct sdhci_host *host) BUG_ON(!host->data); - if (host->num_sg == 0) + if (host->blocks == 0) return; if (host->data->flags & MMC_DATA_READ) @@ -301,19 +278,238 @@ static void sdhci_transfer_pio(struct sdhci_host *host) else mask = SDHCI_SPACE_AVAILABLE; + /* + * Some controllers (JMicron JMB38x) mess up the buffer bits + * for transfers < 4 bytes. As long as it is just one block, + * we can ignore the bits. + */ + if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && + (host->data->blocks == 1)) + mask = ~0; + while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) { if (host->data->flags & MMC_DATA_READ) sdhci_read_block_pio(host); else sdhci_write_block_pio(host); - if (host->num_sg == 0) + host->blocks--; + if (host->blocks == 0) break; } DBG("PIO transfer complete.\n"); } +static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) +{ + local_irq_save(*flags); + return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; +} + +static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) +{ + kunmap_atomic(buffer, KM_BIO_SRC_IRQ); + local_irq_restore(*flags); +} + +static int sdhci_adma_table_pre(struct sdhci_host *host, + struct mmc_data *data) +{ + int direction; + + u8 *desc; + u8 *align; + dma_addr_t addr; + dma_addr_t align_addr; + int len, offset; + + struct scatterlist *sg; + int i; + char *buffer; + unsigned long flags; + + /* + * The spec does not specify endianness of descriptor table. + * We currently guess that it is LE. + */ + + if (data->flags & MMC_DATA_READ) + direction = DMA_FROM_DEVICE; + else + direction = DMA_TO_DEVICE; + + /* + * The ADMA descriptor table is mapped further down as we + * need to fill it with data first. + */ + + host->align_addr = dma_map_single(mmc_dev(host->mmc), + host->align_buffer, 128 * 4, direction); + if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) + goto fail; + BUG_ON(host->align_addr & 0x3); + + host->sg_count = dma_map_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, direction); + if (host->sg_count == 0) + goto unmap_align; + + desc = host->adma_desc; + align = host->align_buffer; + + align_addr = host->align_addr; + + for_each_sg(data->sg, sg, host->sg_count, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + + /* + * The SDHCI specification states that ADMA + * addresses must be 32-bit aligned. If they + * aren't, then we use a bounce buffer for + * the (up to three) bytes that screw up the + * alignment. + */ + offset = (4 - (addr & 0x3)) & 0x3; + if (offset) { + if (data->flags & MMC_DATA_WRITE) { + buffer = sdhci_kmap_atomic(sg, &flags); + WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); + memcpy(align, buffer, offset); + sdhci_kunmap_atomic(buffer, &flags); + } + + desc[7] = (align_addr >> 24) & 0xff; + desc[6] = (align_addr >> 16) & 0xff; + desc[5] = (align_addr >> 8) & 0xff; + desc[4] = (align_addr >> 0) & 0xff; + + BUG_ON(offset > 65536); + + desc[3] = (offset >> 8) & 0xff; + desc[2] = (offset >> 0) & 0xff; + + desc[1] = 0x00; + desc[0] = 0x21; /* tran, valid */ + + align += 4; + align_addr += 4; + + desc += 8; + + addr += offset; + len -= offset; + } + + desc[7] = (addr >> 24) & 0xff; + desc[6] = (addr >> 16) & 0xff; + desc[5] = (addr >> 8) & 0xff; + desc[4] = (addr >> 0) & 0xff; + + BUG_ON(len > 65536); + + desc[3] = (len >> 8) & 0xff; + desc[2] = (len >> 0) & 0xff; + + desc[1] = 0x00; + desc[0] = 0x21; /* tran, valid */ + + desc += 8; + + /* + * If this triggers then we have a calculation bug + * somewhere. :/ + */ + WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); + } + + /* + * Add a terminating entry. + */ + desc[7] = 0; + desc[6] = 0; + desc[5] = 0; + desc[4] = 0; + + desc[3] = 0; + desc[2] = 0; + + desc[1] = 0x00; + desc[0] = 0x03; /* nop, end, valid */ + + /* + * Resync align buffer as we might have changed it. + */ + if (data->flags & MMC_DATA_WRITE) { + dma_sync_single_for_device(mmc_dev(host->mmc), + host->align_addr, 128 * 4, direction); + } + + host->adma_addr = dma_map_single(mmc_dev(host->mmc), + host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); + if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr)) + goto unmap_entries; + BUG_ON(host->adma_addr & 0x3); + + return 0; + +unmap_entries: + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, direction); +unmap_align: + dma_unmap_single(mmc_dev(host->mmc), host->align_addr, + 128 * 4, direction); +fail: + return -EINVAL; +} + +static void sdhci_adma_table_post(struct sdhci_host *host, + struct mmc_data *data) +{ + int direction; + + struct scatterlist *sg; + int i, size; + u8 *align; + char *buffer; + unsigned long flags; + + if (data->flags & MMC_DATA_READ) + direction = DMA_FROM_DEVICE; + else + direction = DMA_TO_DEVICE; + + dma_unmap_single(mmc_dev(host->mmc), host->adma_addr, + (128 * 2 + 1) * 4, DMA_TO_DEVICE); + + dma_unmap_single(mmc_dev(host->mmc), host->align_addr, + 128 * 4, direction); + + if (data->flags & MMC_DATA_READ) { + dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, + data->sg_len, direction); + + align = host->align_buffer; + + for_each_sg(data->sg, sg, host->sg_count, i) { + if (sg_dma_address(sg) & 0x3) { + size = 4 - (sg_dma_address(sg) & 0x3); + + buffer = sdhci_kmap_atomic(sg, &flags); + WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); + memcpy(buffer, align, size); + sdhci_kunmap_atomic(buffer, &flags); + + align += 4; + } + } + } + + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, direction); +} + static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) { u8 count; @@ -363,6 +559,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) { u8 count; + u8 ctrl; + int ret; WARN_ON(host->data); @@ -383,41 +581,127 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) if (host->flags & SDHCI_USE_DMA) host->flags |= SDHCI_REQ_USE_DMA; - if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && - (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) && - ((data->blksz * data->blocks) & 0x3))) { - DBG("Reverting to PIO because of transfer size (%d)\n", - data->blksz * data->blocks); - host->flags &= ~SDHCI_REQ_USE_DMA; + /* + * FIXME: This doesn't account for merging when mapping the + * scatterlist. + */ + if (host->flags & SDHCI_REQ_USE_DMA) { + int broken, i; + struct scatterlist *sg; + + broken = 0; + if (host->flags & SDHCI_USE_ADMA) { + if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) + broken = 1; + } else { + if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) + broken = 1; + } + + if (unlikely(broken)) { + for_each_sg(data->sg, sg, data->sg_len, i) { + if (sg->length & 0x3) { + DBG("Reverting to PIO because of " + "transfer size (%d)\n", + sg->length); + host->flags &= ~SDHCI_REQ_USE_DMA; + break; + } + } + } } /* * The assumption here being that alignment is the same after * translation to device address space. */ - if (unlikely((host->flags & SDHCI_REQ_USE_DMA) && - (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && - (data->sg->offset & 0x3))) { - DBG("Reverting to PIO because of bad alignment\n"); - host->flags &= ~SDHCI_REQ_USE_DMA; + if (host->flags & SDHCI_REQ_USE_DMA) { + int broken, i; + struct scatterlist *sg; + + broken = 0; + if (host->flags & SDHCI_USE_ADMA) { + /* + * As we use 3 byte chunks to work around + * alignment problems, we need to check this + * quirk. + */ + if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) + broken = 1; + } else { + if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) + broken = 1; + } + + if (unlikely(broken)) { + for_each_sg(data->sg, sg, data->sg_len, i) { + if (sg->offset & 0x3) { + DBG("Reverting to PIO because of " + "bad alignment\n"); + host->flags &= ~SDHCI_REQ_USE_DMA; + break; + } + } + } } if (host->flags & SDHCI_REQ_USE_DMA) { - int count; - - count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, - (data->flags & MMC_DATA_READ) ? - DMA_FROM_DEVICE : DMA_TO_DEVICE); - WARN_ON(count != 1); + if (host->flags & SDHCI_USE_ADMA) { + ret = sdhci_adma_table_pre(host, data); + if (ret) { + /* + * This only happens when someone fed + * us an invalid request. + */ + WARN_ON(1); + host->flags &= ~SDHCI_REQ_USE_DMA; + } else { + writel(host->adma_addr, + host->ioaddr + SDHCI_ADMA_ADDRESS); + } + } else { + int sg_cnt; + + sg_cnt = dma_map_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, + (data->flags & MMC_DATA_READ) ? + DMA_FROM_DEVICE : + DMA_TO_DEVICE); + if (sg_cnt == 0) { + /* + * This only happens when someone fed + * us an invalid request. + */ + WARN_ON(1); + host->flags &= ~SDHCI_REQ_USE_DMA; + } else { + WARN_ON(sg_cnt != 1); + writel(sg_dma_address(data->sg), + host->ioaddr + SDHCI_DMA_ADDRESS); + } + } + } - writel(sg_dma_address(data->sg), - host->ioaddr + SDHCI_DMA_ADDRESS); - } else { - host->cur_sg = data->sg; - host->num_sg = data->sg_len; + /* + * Always adjust the DMA selection as some controllers + * (e.g. JMicron) can't do PIO properly when the selection + * is ADMA. + */ + if (host->version >= SDHCI_SPEC_200) { + ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); + ctrl &= ~SDHCI_CTRL_DMA_MASK; + if ((host->flags & SDHCI_REQ_USE_DMA) && + (host->flags & SDHCI_USE_ADMA)) + ctrl |= SDHCI_CTRL_ADMA32; + else + ctrl |= SDHCI_CTRL_SDMA; + writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); + } - host->offset = 0; - host->remain = host->cur_sg->length; + if (!(host->flags & SDHCI_REQ_USE_DMA)) { + sg_miter_start(&host->sg_miter, + data->sg, data->sg_len, SG_MITER_ATOMIC); + host->blocks = data->blocks; } /* We do not handle DMA boundaries, so set it to max (512 KiB) */ @@ -457,9 +741,13 @@ static void sdhci_finish_data(struct sdhci_host *host) host->data = NULL; if (host->flags & SDHCI_REQ_USE_DMA) { - dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, - (data->flags & MMC_DATA_READ) ? - DMA_FROM_DEVICE : DMA_TO_DEVICE); + if (host->flags & SDHCI_USE_ADMA) + sdhci_adma_table_post(host, data); + else { + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, (data->flags & MMC_DATA_READ) ? + DMA_FROM_DEVICE : DMA_TO_DEVICE); + } } /* @@ -676,7 +964,7 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power) } /* - * At least the CaFe chip gets confused if we set the voltage + * At least the Marvell CaFe chip gets confused if we set the voltage * and set turn on power at the same time, so set the voltage first. */ if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)) @@ -712,7 +1000,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) host->mrq = mrq; - if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { + if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT) + || (host->flags & SDHCI_DEVICE_DEAD)) { host->mrq->cmd->error = -ENOMEDIUM; tasklet_schedule(&host->finish_tasklet); } else @@ -732,6 +1021,9 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) spin_lock_irqsave(&host->lock, flags); + if (host->flags & SDHCI_DEVICE_DEAD) + goto out; + /* * Reset the chip on each power off. * Should clear out any weird states. @@ -770,6 +1062,7 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); +out: mmiowb(); spin_unlock_irqrestore(&host->lock, flags); } @@ -784,7 +1077,10 @@ static int sdhci_get_ro(struct mmc_host *mmc) spin_lock_irqsave(&host->lock, flags); - present = readl(host->ioaddr + SDHCI_PRESENT_STATE); + if (host->flags & SDHCI_DEVICE_DEAD) + present = 0; + else + present = readl(host->ioaddr + SDHCI_PRESENT_STATE); spin_unlock_irqrestore(&host->lock, flags); @@ -801,6 +1097,9 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) spin_lock_irqsave(&host->lock, flags); + if (host->flags & SDHCI_DEVICE_DEAD) + goto out; + ier = readl(host->ioaddr + SDHCI_INT_ENABLE); ier &= ~SDHCI_INT_CARD_INT; @@ -810,6 +1109,7 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) writel(ier, host->ioaddr + SDHCI_INT_ENABLE); writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE); +out: mmiowb(); spin_unlock_irqrestore(&host->lock, flags); @@ -875,10 +1175,11 @@ static void sdhci_tasklet_finish(unsigned long param) * The controller needs a reset of internal state machines * upon error conditions. */ - if (mrq->cmd->error || - (mrq->data && (mrq->data->error || - (mrq->data->stop && mrq->data->stop->error))) || - (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) { + if (!(host->flags & SDHCI_DEVICE_DEAD) && + (mrq->cmd->error || + (mrq->data && (mrq->data->error || + (mrq->data->stop && mrq->data->stop->error))) || + (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { /* Some controllers need this kick or reset won't work here */ if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { @@ -995,6 +1296,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) host->data->error = -ETIMEDOUT; else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) host->data->error = -EILSEQ; + else if (intmask & SDHCI_INT_ADMA_ERROR) + host->data->error = -EIO; if (host->data->error) sdhci_finish_data(host); @@ -1186,7 +1489,6 @@ int sdhci_add_host(struct sdhci_host *host) { struct mmc_host *mmc; unsigned int caps; - unsigned int version; int ret; WARN_ON(host == NULL); @@ -1200,12 +1502,13 @@ int sdhci_add_host(struct sdhci_host *host) sdhci_reset(host, SDHCI_RESET_ALL); - version = readw(host->ioaddr + SDHCI_HOST_VERSION); - version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; - if (version > 1) { + host->version = readw(host->ioaddr + SDHCI_HOST_VERSION); + host->version = (host->version & SDHCI_SPEC_VER_MASK) + >> SDHCI_SPEC_VER_SHIFT; + if (host->version > SDHCI_SPEC_200) { printk(KERN_ERR "%s: Unknown controller version (%d). " "You may experience problems.\n", mmc_hostname(mmc), - version); + host->version); } caps = readl(host->ioaddr + SDHCI_CAPABILITIES); @@ -1223,20 +1526,56 @@ int sdhci_add_host(struct sdhci_host *host) host->flags &= ~SDHCI_USE_DMA; } + if (host->flags & SDHCI_USE_DMA) { + if ((host->version >= SDHCI_SPEC_200) && + (caps & SDHCI_CAN_DO_ADMA2)) + host->flags |= SDHCI_USE_ADMA; + } + + if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && + (host->flags & SDHCI_USE_ADMA)) { + DBG("Disabling ADMA as it is marked broken\n"); + host->flags &= ~SDHCI_USE_ADMA; + } + if (host->flags & SDHCI_USE_DMA) { if (host->ops->enable_dma) { if (host->ops->enable_dma(host)) { printk(KERN_WARNING "%s: No suitable DMA " "available. Falling back to PIO.\n", mmc_hostname(mmc)); - host->flags &= ~SDHCI_USE_DMA; + host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA); } } } - /* XXX: Hack to get MMC layer to avoid highmem */ - if (!(host->flags & SDHCI_USE_DMA)) - mmc_dev(host->mmc)->dma_mask = 0; + if (host->flags & SDHCI_USE_ADMA) { + /* + * We need to allocate descriptors for all sg entries + * (128) and potentially one alignment transfer for + * each of those entries. + */ + host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL); + host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); + if (!host->adma_desc || !host->align_buffer) { + kfree(host->adma_desc); + kfree(host->align_buffer); + printk(KERN_WARNING "%s: Unable to allocate ADMA " + "buffers. Falling back to standard DMA.\n", + mmc_hostname(mmc)); + host->flags &= ~SDHCI_USE_ADMA; + } + } + + /* + * If we use DMA, then it's up to the caller to set the DMA + * mask, but PIO does not need the hw shim so we set a new + * mask here in that case. + */ + if (!(host->flags & SDHCI_USE_DMA)) { + host->dma_mask = DMA_BIT_MASK(64); + mmc_dev(host->mmc)->dma_mask = &host->dma_mask; + } host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; @@ -1285,13 +1624,16 @@ int sdhci_add_host(struct sdhci_host *host) spin_lock_init(&host->lock); /* - * Maximum number of segments. Hardware cannot do scatter lists. + * Maximum number of segments. Depends on if the hardware + * can do scatter/gather or not. */ - if (host->flags & SDHCI_USE_DMA) + if (host->flags & SDHCI_USE_ADMA) + mmc->max_hw_segs = 128; + else if (host->flags & SDHCI_USE_DMA) mmc->max_hw_segs = 1; - else - mmc->max_hw_segs = 16; - mmc->max_phys_segs = 16; + else /* PIO */ + mmc->max_hw_segs = 128; + mmc->max_phys_segs = 128; /* * Maximum number of sectors in one transfer. Limited by DMA boundary @@ -1301,9 +1643,13 @@ int sdhci_add_host(struct sdhci_host *host) /* * Maximum segment size. Could be one segment with the maximum number - * of bytes. + * of bytes. When doing hardware scatter/gather, each entry cannot + * be larger than 64 KiB though. */ - mmc->max_seg_size = mmc->max_req_size; + if (host->flags & SDHCI_USE_ADMA) + mmc->max_seg_size = 65536; + else + mmc->max_seg_size = mmc->max_req_size; /* * Maximum block size. This varies from controller to controller and @@ -1358,8 +1704,9 @@ int sdhci_add_host(struct sdhci_host *host) mmc_add_host(mmc); - printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n", + printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n", mmc_hostname(mmc), host->hw_name, mmc_dev(mmc)->bus_id, + (host->flags & SDHCI_USE_ADMA)?"A":"", (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); return 0; @@ -1378,15 +1725,34 @@ untasklet: EXPORT_SYMBOL_GPL(sdhci_add_host); -void sdhci_remove_host(struct sdhci_host *host) +void sdhci_remove_host(struct sdhci_host *host, int dead) { + unsigned long flags; + + if (dead) { + spin_lock_irqsave(&host->lock, flags); + + host->flags |= SDHCI_DEVICE_DEAD; + + if (host->mrq) { + printk(KERN_ERR "%s: Controller removed during " + " transfer!\n", mmc_hostname(host->mmc)); + + host->mrq->cmd->error = -ENOMEDIUM; + tasklet_schedule(&host->finish_tasklet); + } + + spin_unlock_irqrestore(&host->lock, flags); + } + mmc_remove_host(host->mmc); #ifdef CONFIG_LEDS_CLASS led_classdev_unregister(&host->led); #endif - sdhci_reset(host, SDHCI_RESET_ALL); + if (!dead) + sdhci_reset(host, SDHCI_RESET_ALL); free_irq(host->irq, host); @@ -1394,6 +1760,12 @@ void sdhci_remove_host(struct sdhci_host *host) tasklet_kill(&host->card_tasklet); tasklet_kill(&host->finish_tasklet); + + kfree(host->adma_desc); + kfree(host->align_buffer); + + host->adma_desc = NULL; + host->align_buffer = NULL; } EXPORT_SYMBOL_GPL(sdhci_remove_host);