diff options
Diffstat (limited to 'drivers/mmc/host')
-rw-r--r-- | drivers/mmc/host/Kconfig | 29 | ||||
-rw-r--r-- | drivers/mmc/host/Makefile | 1 | ||||
-rw-r--r-- | drivers/mmc/host/atmel-mci.c | 42 | ||||
-rw-r--r-- | drivers/mmc/host/mmc_spi.c | 1 | ||||
-rw-r--r-- | drivers/mmc/host/msm_sdcc.c | 1287 | ||||
-rw-r--r-- | drivers/mmc/host/msm_sdcc.h | 238 | ||||
-rw-r--r-- | drivers/mmc/host/mxcmmc.c | 2 | ||||
-rw-r--r-- | drivers/mmc/host/omap_hsmmc.c | 1083 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-of.c | 49 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci-pci.c | 5 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci.c | 54 | ||||
-rw-r--r-- | drivers/mmc/host/sdhci.h | 6 |
12 files changed, 2541 insertions, 256 deletions
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 891ef18bd77..7cb057f3f88 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -132,11 +132,11 @@ config MMC_OMAP config MMC_OMAP_HS tristate "TI OMAP High Speed Multimedia Card Interface support" - depends on ARCH_OMAP2430 || ARCH_OMAP3 + depends on ARCH_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4 help This selects the TI OMAP High Speed Multimedia card Interface. - If you have an OMAP2430 or OMAP3 board with a Multimedia Card slot, - say Y or M here. + If you have an OMAP2430 or OMAP3 board or OMAP4 board with a + Multimedia Card slot, say Y or M here. If unsure, say N. @@ -160,6 +160,12 @@ config MMC_AU1X If unsure, say N. +choice + prompt "Atmel SD/MMC Driver" + default MMC_ATMELMCI if AVR32 + help + Choose which driver to use for the Atmel MCI Silicon + config MMC_AT91 tristate "AT91 SD/MMC Card Interface support" depends on ARCH_AT91 @@ -170,17 +176,19 @@ config MMC_AT91 config MMC_ATMELMCI tristate "Atmel Multimedia Card Interface support" - depends on AVR32 + depends on AVR32 || ARCH_AT91 help This selects the Atmel Multimedia Card Interface driver. If - you have an AT32 (AVR32) platform with a Multimedia Card - slot, say Y or M here. + you have an AT32 (AVR32) or AT91 platform with a Multimedia + Card slot, say Y or M here. If unsure, say N. +endchoice + config MMC_ATMELMCI_DMA bool "Atmel MCI DMA support (EXPERIMENTAL)" - depends on MMC_ATMELMCI && DMA_ENGINE && EXPERIMENTAL + depends on MMC_ATMELMCI && AVR32 && DMA_ENGINE && EXPERIMENTAL help Say Y here to have the Atmel MCI driver use a DMA engine to do data transfers and thus increase the throughput and @@ -199,6 +207,13 @@ config MMC_IMX If unsure, say N. +config MMC_MSM7X00A + tristate "Qualcomm MSM 7X00A SDCC Controller Support" + depends on MMC && ARCH_MSM + help + This provides support for the SD/MMC cell found in the + MSM 7X00A controllers from Qualcomm. + config MMC_MXC tristate "Freescale i.MX2/3 Multimedia Card Interface support" depends on ARCH_MXC diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index cf153f62845..abcb0400e06 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o obj-$(CONFIG_MMC_AT91) += at91_mci.o obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o +obj-$(CONFIG_MMC_MSM7X00A) += msm_sdcc.o obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o obj-$(CONFIG_MMC_SPI) += mmc_spi.o ifeq ($(CONFIG_OF),y) diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 7b603e4b41d..fc25586b7ee 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -30,6 +30,7 @@ #include <asm/io.h> #include <asm/unaligned.h> +#include <mach/cpu.h> #include <mach/board.h> #include "atmel-mci-regs.h" @@ -210,6 +211,18 @@ struct atmel_mci_slot { set_bit(event, &host->pending_events) /* + * Enable or disable features/registers based on + * whether the processor supports them + */ +static bool mci_has_rwproof(void) +{ + if (cpu_is_at91sam9261() || cpu_is_at91rm9200()) + return false; + else + return true; +} + +/* * The debugfs stuff below is mostly optimized away when * CONFIG_DEBUG_FS is not set. */ @@ -276,8 +289,13 @@ static void atmci_show_status_reg(struct seq_file *s, [3] = "BLKE", [4] = "DTIP", [5] = "NOTBUSY", + [6] = "ENDRX", + [7] = "ENDTX", [8] = "SDIOIRQA", [9] = "SDIOIRQB", + [12] = "SDIOWAIT", + [14] = "RXBUFF", + [15] = "TXBUFE", [16] = "RINDE", [17] = "RDIRE", [18] = "RCRCE", @@ -285,6 +303,11 @@ static void atmci_show_status_reg(struct seq_file *s, [20] = "RTOE", [21] = "DCRCE", [22] = "DTOE", + [23] = "CSTOE", + [24] = "BLKOVRE", + [25] = "DMADONE", + [26] = "FIFOEMPTY", + [27] = "XFRDONE", [30] = "OVRE", [31] = "UNRE", }; @@ -576,6 +599,7 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) struct scatterlist *sg; unsigned int i; enum dma_data_direction direction; + unsigned int sglen; /* * We don't do DMA on "complex" transfers, i.e. with @@ -605,11 +629,14 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) else direction = DMA_TO_DEVICE; + sglen = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, direction); + if (sglen != data->sg_len) + goto unmap_exit; desc = chan->device->device_prep_slave_sg(chan, data->sg, data->sg_len, direction, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) - return -ENOMEM; + goto unmap_exit; host->dma.data_desc = desc; desc->callback = atmci_dma_complete; @@ -620,6 +647,9 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) chan->device->device_issue_pending(chan); return 0; +unmap_exit: + dma_unmap_sg(&host->pdev->dev, data->sg, sglen, direction); + return -ENOMEM; } #else /* CONFIG_MMC_ATMELMCI_DMA */ @@ -849,13 +879,15 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) clkdiv = 255; } + host->mode_reg = MCI_MR_CLKDIV(clkdiv); + /* * WRPROOF and RDPROOF prevent overruns/underruns by * stopping the clock when the FIFO is full/empty. * This state is not expected to last for long. */ - host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF - | MCI_MR_RDPROOF; + if (mci_has_rwproof()) + host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF); if (list_empty(&host->queue)) mci_writel(host, MR, host->mode_reg); @@ -1648,8 +1680,10 @@ static int __init atmci_probe(struct platform_device *pdev) nr_slots++; } - if (!nr_slots) + if (!nr_slots) { + dev_err(&pdev->dev, "init failed: no slot defined\n"); goto err_init_slot; + } dev_info(&pdev->dev, "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index a461017ce5c..d55fe4fb793 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1562,3 +1562,4 @@ MODULE_AUTHOR("Mike Lavender, David Brownell, " "Hans-Peter Nilsson, Jan Nikitenko"); MODULE_DESCRIPTION("SPI SD/MMC host driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("spi:mmc_spi"); diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c new file mode 100644 index 00000000000..dba4600bcdb --- /dev/null +++ b/drivers/mmc/host/msm_sdcc.c @@ -0,0 +1,1287 @@ +/* + * linux/drivers/mmc/host/msm_sdcc.c - Qualcomm MSM 7X00A SDCC Driver + * + * Copyright (C) 2007 Google Inc, + * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Based on mmci.c + * + * Author: San Mehat (san@android.com) + * + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/highmem.h> +#include <linux/log2.h> +#include <linux/mmc/host.h> +#include <linux/mmc/card.h> +#include <linux/clk.h> +#include <linux/scatterlist.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/debugfs.h> +#include <linux/io.h> +#include <linux/memory.h> + +#include <asm/cacheflush.h> +#include <asm/div64.h> +#include <asm/sizes.h> + +#include <asm/mach/mmc.h> +#include <mach/msm_iomap.h> +#include <mach/dma.h> +#include <mach/htc_pwrsink.h> + +#include "msm_sdcc.h" + +#define DRIVER_NAME "msm-sdcc" + +static unsigned int msmsdcc_fmin = 144000; +static unsigned int msmsdcc_fmax = 50000000; +static unsigned int msmsdcc_4bit = 1; +static unsigned int msmsdcc_pwrsave = 1; +static unsigned int msmsdcc_piopoll = 1; +static unsigned int msmsdcc_sdioirq; + +#define PIO_SPINMAX 30 +#define CMD_SPINMAX 20 + +static void +msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, + u32 c); + +static void +msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq) +{ + writel(0, host->base + MMCICOMMAND); + + BUG_ON(host->curr.data); + + host->curr.mrq = NULL; + host->curr.cmd = NULL; + + if (mrq->data) + mrq->data->bytes_xfered = host->curr.data_xfered; + if (mrq->cmd->error == -ETIMEDOUT) + mdelay(5); + + /* + * Need to drop the host lock here; mmc_request_done may call + * back into the driver... + */ + spin_unlock(&host->lock); + mmc_request_done(host->mmc, mrq); + spin_lock(&host->lock); +} + +static void +msmsdcc_stop_data(struct msmsdcc_host *host) +{ + writel(0, host->base + MMCIDATACTRL); + host->curr.data = NULL; + host->curr.got_dataend = host->curr.got_datablkend = 0; +} + +uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host) +{ + switch (host->pdev_id) { + case 1: + return MSM_SDC1_PHYS + MMCIFIFO; + case 2: + return MSM_SDC2_PHYS + MMCIFIFO; + case 3: + return MSM_SDC3_PHYS + MMCIFIFO; + case 4: + return MSM_SDC4_PHYS + MMCIFIFO; + } + BUG(); + return 0; +} + +static void +msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd, + unsigned int result, + struct msm_dmov_errdata *err) +{ + struct msmsdcc_dma_data *dma_data = + container_of(cmd, struct msmsdcc_dma_data, hdr); + struct msmsdcc_host *host = dma_data->host; + unsigned long flags; + struct mmc_request *mrq; + + spin_lock_irqsave(&host->lock, flags); + mrq = host->curr.mrq; + BUG_ON(!mrq); + + if (!(result & DMOV_RSLT_VALID)) { + pr_err("msmsdcc: Invalid DataMover result\n"); + goto out; + } + + if (result & DMOV_RSLT_DONE) { + host->curr.data_xfered = host->curr.xfer_size; + } else { + /* Error or flush */ + if (result & DMOV_RSLT_ERROR) + pr_err("%s: DMA error (0x%.8x)\n", + mmc_hostname(host->mmc), result); + if (result & DMOV_RSLT_FLUSH) + pr_err("%s: DMA channel flushed (0x%.8x)\n", + mmc_hostname(host->mmc), result); + if (err) + pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n", + err->flush[0], err->flush[1], err->flush[2], + err->flush[3], err->flush[4], err->flush[5]); + if (!mrq->data->error) + mrq->data->error = -EIO; + } + host->dma.busy = 0; + dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents, + host->dma.dir); + + if (host->curr.user_pages) { + struct scatterlist *sg = host->dma.sg; + int i; + + for (i = 0; i < host->dma.num_ents; i++) + flush_dcache_page(sg_page(sg++)); + } + + host->dma.sg = NULL; + + if ((host->curr.got_dataend && host->curr.got_datablkend) + || mrq->data->error) { + + /* + * If we've already gotten our DATAEND / DATABLKEND + * for this request, then complete it through here. + */ + msmsdcc_stop_data(host); + + if (!mrq->data->error) + host->curr.data_xfered = host->curr.xfer_size; + if (!mrq->data->stop || mrq->cmd->error) { + writel(0, host->base + MMCICOMMAND); + host->curr.mrq = NULL; + host->curr.cmd = NULL; + mrq->data->bytes_xfered = host->curr.data_xfered; + + spin_unlock_irqrestore(&host->lock, flags); + mmc_request_done(host->mmc, mrq); + return; + } else + msmsdcc_start_command(host, mrq->data->stop, 0); + } + +out: + spin_unlock_irqrestore(&host->lock, flags); + return; +} + +static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data) +{ + if (host->dma.channel == -1) + return -ENOENT; + + if ((data->blksz * data->blocks) < MCI_FIFOSIZE) + return -EINVAL; + if ((data->blksz * data->blocks) % MCI_FIFOSIZE) + return -EINVAL; + return 0; +} + +static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data) +{ + struct msmsdcc_nc_dmadata *nc; + dmov_box *box; + uint32_t rows; + uint32_t crci; + unsigned int n; + int i, rc; + struct scatterlist *sg = data->sg; + + rc = validate_dma(host, data); + if (rc) + return rc; + + host->dma.sg = data->sg; + host->dma.num_ents = data->sg_len; + + nc = host->dma.nc; + + switch (host->pdev_id) { + case 1: + crci = MSMSDCC_CRCI_SDC1; + break; + case 2: + crci = MSMSDCC_CRCI_SDC2; + break; + case 3: + crci = MSMSDCC_CRCI_SDC3; + break; + case 4: + crci = MSMSDCC_CRCI_SDC4; + break; + default: + host->dma.sg = NULL; + host->dma.num_ents = 0; + return -ENOENT; + } + + if (data->flags & MMC_DATA_READ) + host->dma.dir = DMA_FROM_DEVICE; + else + host->dma.dir = DMA_TO_DEVICE; + + host->curr.user_pages = 0; + + n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg, + host->dma.num_ents, host->dma.dir); + + if (n != host->dma.num_ents) { + pr_err("%s: Unable to map in all sg elements\n", + mmc_hostname(host->mmc)); + host->dma.sg = NULL; + host->dma.num_ents = 0; + return -ENOMEM; + } + + box = &nc->cmd[0]; + for (i = 0; i < host->dma.num_ents; i++) { + box->cmd = CMD_MODE_BOX; + + if (i == (host->dma.num_ents - 1)) + box->cmd |= CMD_LC; + rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ? + (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 : + (sg_dma_len(sg) / MCI_FIFOSIZE) ; + + if (data->flags & MMC_DATA_READ) { + box->src_row_addr = msmsdcc_fifo_addr(host); + box->dst_row_addr = sg_dma_address(sg); + + box->src_dst_len = (MCI_FIFOSIZE << 16) | + (MCI_FIFOSIZE); + box->row_offset = MCI_FIFOSIZE; + + box->num_rows = rows * ((1 << 16) + 1); + box->cmd |= CMD_SRC_CRCI(crci); + } else { + box->src_row_addr = sg_dma_address(sg); + box->dst_row_addr = msmsdcc_fifo_addr(host); + + box->src_dst_len = (MCI_FIFOSIZE << 16) | + (MCI_FIFOSIZE); + box->row_offset = (MCI_FIFOSIZE << 16); + + box->num_rows = rows * ((1 << 16) + 1); + box->cmd |= CMD_DST_CRCI(crci); + } + box++; + sg++; + } + + /* location of command block must be 64 bit aligned */ + BUG_ON(host->dma.cmd_busaddr & 0x07); + + nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP; + host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST | + DMOV_CMD_ADDR(host->dma.cmdptr_busaddr); + host->dma.hdr.complete_func = msmsdcc_dma_complete_func; + + return 0; +} + +static void +msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data) +{ + unsigned int datactrl, timeout; + unsigned long long clks; + void __iomem *base = host->base; + unsigned int pio_irqmask = 0; + + host->curr.data = data; + host->curr.xfer_size = data->blksz * data->blocks; + host->curr.xfer_remain = host->curr.xfer_size; + host->curr.data_xfered = 0; + host->curr.got_dataend = 0; + host->curr.got_datablkend = 0; + + memset(&host->pio, 0, sizeof(host->pio)); + + clks = (unsigned long long)data->timeout_ns * host->clk_rate; + do_div(clks, NSEC_PER_SEC); + timeout = data->timeout_clks + (unsigned int)clks; + writel(timeout, base + MMCIDATATIMER); + + writel(host->curr.xfer_size, base + MMCIDATALENGTH); + + datactrl = MCI_DPSM_ENABLE | (data->blksz << 4); + + if (!msmsdcc_config_dma(host, data)) + datactrl |= MCI_DPSM_DMAENABLE; + else { + host->pio.sg = data->sg; + host->pio.sg_len = data->sg_len; + host->pio.sg_off = 0; + + if (data->flags & MMC_DATA_READ) { + pio_irqmask = MCI_RXFIFOHALFFULLMASK; + if (host->curr.xfer_remain < MCI_FIFOSIZE) + pio_irqmask |= MCI_RXDATAAVLBLMASK; + } else + pio_irqmask = MCI_TXFIFOHALFEMPTYMASK; + } + + if (data->flags & MMC_DATA_READ) + datactrl |= MCI_DPSM_DIRECTION; + + writel(pio_irqmask, base + MMCIMASK1); + writel(datactrl, base + MMCIDATACTRL); + + if (datactrl & MCI_DPSM_DMAENABLE) { + host->dma.busy = 1; + msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr); + } +} + +static void +msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c) +{ + void __iomem *base = host->base; + + if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { + writel(0, base + MMCICOMMAND); + udelay(2 + ((5 * 1000000) / host->clk_rate)); + } + + c |= cmd->opcode | MCI_CPSM_ENABLE; + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) + c |= MCI_CPSM_LONGRSP; + c |= MCI_CPSM_RESPONSE; + } + + if (cmd->opcode == 17 || cmd->opcode == 18 || + cmd->opcode == 24 || cmd->opcode == 25 || + cmd->opcode == 53) + c |= MCI_CSPM_DATCMD; + + if (cmd == cmd->mrq->stop) + c |= MCI_CSPM_MCIABORT; + + host->curr.cmd = cmd; + + host->stats.cmds++; + + writel(cmd->arg, base + MMCIARGUMENT); + writel(c, base + MMCICOMMAND); +} + +static void +msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data, + unsigned int status) +{ + if (status & MCI_DATACRCFAIL) { + pr_err("%s: Data CRC error\n", mmc_hostname(host->mmc)); + pr_err("%s: opcode 0x%.8x\n", __func__, + data->mrq->cmd->opcode); + pr_err("%s: blksz %d, blocks %d\n", __func__, + data->blksz, data->blocks); + data->error = -EILSEQ; + } else if (status & MCI_DATATIMEOUT) { + pr_err("%s: Data timeout\n", mmc_hostname(host->mmc)); + data->error = -ETIMEDOUT; + } else if (status & MCI_RXOVERRUN) { + pr_err("%s: RX overrun\n", mmc_hostname(host->mmc)); + data->error = -EIO; + } else if (status & MCI_TXUNDERRUN) { + pr_err("%s: TX underrun\n", mmc_hostname(host->mmc)); + data->error = -EIO; + } else { + pr_err("%s: Unknown error (0x%.8x)\n", + mmc_hostname(host->mmc), status); + data->error = -EIO; + } +} + + +static int +msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain) +{ + void __iomem *base = host->base; + uint32_t *ptr = (uint32_t *) buffer; + int count = 0; + + while (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) { + + *ptr = readl(base + MMCIFIFO + (count % MCI_FIFOSIZE)); + ptr++; + count += sizeof(uint32_t); + + remain -= sizeof(uint32_t); + if (remain == 0) + break; + } + return count; +} + +static int +msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer, + unsigned int remain, u32 status) +{ + void __iomem *base = host->base; + char *ptr = buffer; + + do { + unsigned int count, maxcnt; + + maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : + MCI_FIFOHALFSIZE; + count = min(remain, maxcnt); + + writesl(base + MMCIFIFO, ptr, count >> 2); + ptr += count; + remain -= count; + + if (remain == 0) + break; + + status = readl(base + MMCISTATUS); + } while (status & MCI_TXFIFOHALFEMPTY); + + return ptr - buffer; +} + +static int +msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin) +{ + while (maxspin) { + if ((readl(host->base + MMCISTATUS) & mask)) + return 0; + udelay(1); + --maxspin; + } + return -ETIMEDOUT; +} + +static int +msmsdcc_pio_irq(int irq, void *dev_id) +{ + struct msmsdcc_host *host = dev_id; + void __iomem *base = host->base; + uint32_t status; + + status = readl(base + MMCISTATUS); + + do { + unsigned long flags; + unsigned int remain, len; + char *buffer; + + if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_RXDATAAVLBL))) { + if (host->curr.xfer_remain == 0 || !msmsdcc_piopoll) + break; + + if (msmsdcc_spin_on_status(host, + (MCI_TXFIFOHALFEMPTY | + MCI_RXDATAAVLBL), + PIO_SPINMAX)) { + break; + } + } + + /* Map the current scatter buffer */ + local_irq_save(flags); + buffer = kmap_atomic(sg_page(host->pio.sg), + KM_BIO_SRC_IRQ) + host->pio.sg->offset; + buffer += host->pio.sg_off; + remain = host->pio.sg->length - host->pio.sg_off; + len = 0; + if (status & MCI_RXACTIVE) + len = msmsdcc_pio_read(host, buffer, remain); + if (status & MCI_TXACTIVE) + len = msmsdcc_pio_write(host, buffer, remain, status); + + /* Unmap the buffer */ + kunmap_atomic(buffer, KM_BIO_SRC_IRQ); + local_irq_restore(flags); + + host->pio.sg_off += len; + host->curr.xfer_remain -= len; + host->curr.data_xfered += len; + remain -= len; + + if (remain == 0) { + /* This sg page is full - do some housekeeping */ + if (status & MCI_RXACTIVE && host->curr.user_pages) + flush_dcache_page(sg_page(host->pio.sg)); + + if (!--host->pio.sg_len) { + memset(&host->pio, 0, sizeof(host->pio)); + break; + } + + /* Advance to next sg */ + host->pio.sg++; + host->pio.sg_off = 0; + } + + status = readl(base + MMCISTATUS); + } while (1); + + if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE) + writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1); + + if (!host->curr.xfer_remain) + writel(0, base + MMCIMASK1); + + return IRQ_HANDLED; +} + +static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status) +{ + struct mmc_command *cmd = host->curr.cmd; + void __iomem *base = host->base; + + host->curr.cmd = NULL; + cmd->resp[0] = readl(base + MMCIRESPONSE0); + cmd->resp[1] = readl(base + MMCIRESPONSE1); + cmd->resp[2] = readl(base + MMCIRESPONSE2); + cmd->resp[3] = readl(base + MMCIRESPONSE3); + + del_timer(&host->command_timer); + if (status & MCI_CMDTIMEOUT) { + cmd->error = -ETIMEDOUT; + } else if (status & MCI_CMDCRCFAIL && + cmd->flags & MMC_RSP_CRC) { + pr_err("%s: Command CRC error\n", mmc_hostname(host->mmc)); + cmd->error = -EILSEQ; + } + + if (!cmd->data || cmd->error) { + if (host->curr.data && host->dma.sg) + msm_dmov_stop_cmd(host->dma.channel, + &host->dma.hdr, 0); + else if (host->curr.data) { /* Non DMA */ + msmsdcc_stop_data(host); + msmsdcc_request_end(host, cmd->mrq); + } else /* host->data == NULL */ + msmsdcc_request_end(host, cmd->mrq); + } else if (!(cmd->data->flags & MMC_DATA_READ)) + msmsdcc_start_data(host, cmd->data); +} + +static void +msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status, + void __iomem *base) +{ + struct mmc_data *data = host->curr.data; + + if (!data) + return; + + /* Check for data errors */ + if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT | + MCI_TXUNDERRUN | MCI_RXOVERRUN)) { + msmsdcc_data_err(host, data, status); + host->curr.data_xfered = 0; + if (host->dma.sg) + msm_dmov_stop_cmd(host->dma.channel, + &host->dma.hdr, 0); + else { + msmsdcc_stop_data(host); + if (!data->stop) + msmsdcc_request_end(host, data->mrq); + else + msmsdcc_start_command(host, data->stop, 0); + } + } + + /* Check for data done */ + if (!host->curr.got_dataend && (status & MCI_DATAEND)) + host->curr.got_dataend = 1; + + if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND)) + host->curr.got_datablkend = 1; + + /* + * If DMA is still in progress, we complete via the completion handler + */ + if (host->curr.got_dataend && host->curr.got_datablkend && + !host->dma.busy) { + /* + * There appears to be an issue in the controller where + * if you request a small block transfer (< fifo size), + * you may get your DATAEND/DATABLKEND irq without the + * PIO data irq. + * + * Check to see if there is still data to be read, + * and simulate a PIO irq. + */ + if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) + msmsdcc_pio_irq(1, host); + + msmsdcc_stop_data(host); + if (!data->error) + host->curr.data_xfered = host->curr.xfer_size; + + if (!data->stop) + msmsdcc_request_end(host, data->mrq); + else + msmsdcc_start_command(host, data->stop, 0); + } +} + +static irqreturn_t +msmsdcc_irq(int irq, void *dev_id) +{ + struct msmsdcc_host *host = dev_id; + void __iomem *base = host->base; + u32 status; + int ret = 0; + int cardint = 0; + + spin_lock(&host->lock); + + do { + status = readl(base + MMCISTATUS); + + status &= (readl(base + MMCIMASK0) | MCI_DATABLOCKENDMASK); + writel(status, base + MMCICLEAR); + + msmsdcc_handle_irq_data(host, status, base); + + if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL | + MCI_CMDTIMEOUT) && host->curr.cmd) { + msmsdcc_do_cmdirq(host, status); + } + + if (status & MCI_SDIOINTOPER) { + cardint = 1; + status &= ~MCI_SDIOINTOPER; + } + ret = 1; + } while (status); + + spin_unlock(&host->lock); + + /* + * We have to delay handling the card interrupt as it calls + * back into the driver. + */ + if (cardint) + mmc_signal_sdio_irq(host->mmc); + + return IRQ_RETVAL(ret); +} + +static void +msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct msmsdcc_host *host = mmc_priv(mmc); + unsigned long flags; + + WARN_ON(host->curr.mrq != NULL); + WARN_ON(host->pwr == 0); + + spin_lock_irqsave(&host->lock, flags); + + host->stats.reqs++; + + if (host->eject) { + if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) { + mrq->cmd->error = 0; + mrq->data->bytes_xfered = mrq->data->blksz * + mrq->data->blocks; + } else + mrq->cmd->error = -ENOMEDIUM; + + spin_unlock_irqrestore(&host->lock, flags); + mmc_request_done(mmc, mrq); + return; + } + + host->curr.mrq = mrq; + + if (mrq->data && mrq->data->flags & MMC_DATA_READ) + msmsdcc_start_data(host, mrq->data); + + msmsdcc_start_command(host, mrq->cmd, 0); + + if (host->cmdpoll && !msmsdcc_spin_on_status(host, + MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT, + CMD_SPINMAX)) { + uint32_t status = readl(host->base + MMCISTATUS); + msmsdcc_do_cmdirq(host, status); + writel(MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT, + host->base + MMCICLEAR); + host->stats.cmdpoll_hits++; + } else { + host->stats.cmdpoll_misses++; + mod_timer(&host->command_timer, jiffies + HZ); + } + spin_unlock_irqrestore(&host->lock, flags); +} + +static void +msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct msmsdcc_host *host = mmc_priv(mmc); + u32 clk = 0, pwr = 0; + int rc; + + if (ios->clock) { + + if (!host->clks_on) { + clk_enable(host->pclk); + clk_enable(host->clk); + host->clks_on = 1; + } + if (ios->clock != host->clk_rate) { + rc = clk_set_rate(host->clk, ios->clock); + if (rc < 0) + pr_err("%s: Error setting clock rate (%d)\n", + mmc_hostname(host->mmc), rc); + else + host->clk_rate = ios->clock; + } + clk |= MCI_CLK_ENABLE; + } + + if (ios->bus_width == MMC_BUS_WIDTH_4) + clk |= (2 << 10); /* Set WIDEBUS */ + + if (ios->clock > 400000 && msmsdcc_pwrsave) + clk |= (1 << 9); /* PWRSAVE */ + + clk |= (1 << 12); /* FLOW_ENA */ + clk |= (1 << 15); /* feedback clock */ + + if (host->plat->translate_vdd) + pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd); + + switch (ios->power_mode) { + case MMC_POWER_OFF: + htc_pwrsink_set(PWRSINK_SDCARD, 0); + break; + case MMC_POWER_UP: + pwr |= MCI_PWR_UP; + break; + case MMC_POWER_ON: + htc_pwrsink_set(PWRSINK_SDCARD, 100); + pwr |= MCI_PWR_ON; + break; + } + + if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) + pwr |= MCI_OD; + + writel(clk, host->base + MMCICLOCK); + + if (host->pwr != pwr) { + host->pwr = pwr; + writel(pwr, host->base + MMCIPOWER); + } + + if (!(clk & MCI_CLK_ENABLE) && host->clks_on) { + clk_disable(host->clk); + clk_disable(host->pclk); + host->clks_on = 0; + } +} + +static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct msmsdcc_host *host = mmc_priv(mmc); + unsigned long flags; + u32 status; + + spin_lock_irqsave(&host->lock, flags); + if (msmsdcc_sdioirq == 1) { + status = readl(host->base + MMCIMASK0); + if (enable) + status |= MCI_SDIOINTOPERMASK; + else + status &= ~MCI_SDIOINTOPERMASK; + host->saved_irq0mask = status; + writel(status, host->base + MMCIMASK0); + } + spin_unlock_irqrestore(&host->lock, flags); +} + +static const struct mmc_host_ops msmsdcc_ops = { + .request = msmsdcc_request, + .set_ios = msmsdcc_set_ios, + .enable_sdio_irq = msmsdcc_enable_sdio_irq, +}; + +static void +msmsdcc_check_status(unsigned long data) +{ + struct msmsdcc_host *host = (struct msmsdcc_host *)data; + unsigned int status; + + if (!host->plat->status) { + mmc_detect_change(host->mmc, 0); + goto out; + } + + status = host->plat->status(mmc_dev(host->mmc)); + host->eject = !status; + if (status ^ host->oldstat) { + pr_info("%s: Slot status change detected (%d -> %d)\n", + mmc_hostname(host->mmc), host->oldstat, status); + if (status) + mmc_detect_change(host->mmc, (5 * HZ) / 2); + else + mmc_detect_change(host->mmc, 0); + } + + host->oldstat = status; + +out: + if (host->timer.function) + mod_timer(&host->timer, jiffies + HZ); +} + +static irqreturn_t +msmsdcc_platform_status_irq(int irq, void *dev_id) +{ + struct msmsdcc_host *host = dev_id; + + printk(KERN_DEBUG "%s: %d\n", __func__, irq); + msmsdcc_check_status((unsigned long) host); + return IRQ_HANDLED; +} + +static void +msmsdcc_status_notify_cb(int card_present, void *dev_id) +{ + struct msmsdcc_host *host = dev_id; + + printk(KERN_DEBUG "%s: card_present %d\n", mmc_hostname(host->mmc), + card_present); + msmsdcc_check_status((unsigned long) host); +} + +/* + * called when a command expires. + * Dump some debugging, and then error + * out the transaction. + */ +static void +msmsdcc_command_expired(unsigned long _data) +{ + struct msmsdcc_host *host = (struct msmsdcc_host *) _data; + struct mmc_request *mrq; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + mrq = host->curr.mrq; + + if (!mrq) { + pr_info("%s: Command expiry misfire\n", + mmc_hostname(host->mmc)); + spin_unlock_irqrestore(&host->lock, flags); + return; + } + + pr_err("%s: Command timeout (%p %p %p %p)\n", + mmc_hostname(host->mmc), mrq, mrq->cmd, + mrq->data, host->dma.sg); + + mrq->cmd->error = -ETIMEDOUT; + msmsdcc_stop_data(host); + + writel(0, host->base + MMCICOMMAND); + + host->curr.mrq = NULL; + host->curr.cmd = NULL; + + spin_unlock_irqrestore(&host->lock, flags); + mmc_request_done(host->mmc, mrq); +} + +static int +msmsdcc_init_dma(struct msmsdcc_host *host) +{ + memset(&host->dma, 0, sizeof(struct msmsdcc_dma_data)); + host->dma.host = host; + host->dma.channel = -1; + + if (!host->dmares) + return -ENODEV; + + host->dma.nc = dma_alloc_coherent(NULL, + sizeof(struct msmsdcc_nc_dmadata), + &host->dma.nc_busaddr, + GFP_KERNEL); + if (host->dma.nc == NULL) { + pr_err("Unable to allocate DMA buffer\n"); + return -ENOMEM; + } + memset(host->dma.nc, 0x00, sizeof(struct msmsdcc_nc_dmadata)); + host->dma.cmd_busaddr = host->dma.nc_busaddr; + host->dma.cmdptr_busaddr = host->dma.nc_busaddr + + offsetof(struct msmsdcc_nc_dmadata, cmdptr); + host->dma.channel = host->dmares->start; + + return 0; +} + +#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ +static void +do_resume_work(struct work_struct *work) +{ + struct msmsdcc_host *host = + container_of(work, struct msmsdcc_host, resume_task); + struct mmc_host *mmc = host->mmc; + + if (mmc) { + mmc_resume_host(mmc); + if (host->stat_irq) + enable_irq(host->stat_irq); + } +} +#endif + +static int +msmsdcc_probe(struct platform_device *pdev) +{ + struct mmc_platform_data *plat = pdev->dev.platform_data; + struct msmsdcc_host *host; + struct mmc_host *mmc; + struct resource *cmd_irqres = NULL; + struct resource *pio_irqres = NULL; + struct resource *stat_irqres = NULL; + struct resource *memres = NULL; + struct resource *dmares = NULL; + int ret; + + /* must have platform data */ + if (!plat) { + pr_err("%s: Platform data not available\n", __func__); + ret = -EINVAL; + goto out; + } + + if (pdev->id < 1 || pdev->id > 4) + return -EINVAL; + + if (pdev->resource == NULL || pdev->num_resources < 2) { + pr_err("%s: Invalid resource\n", __func__); + return -ENXIO; + } + + memres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); + cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + "cmd_irq"); + pio_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + "pio_irq"); + stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + "status_irq"); + + if (!cmd_irqres || !pio_irqres || !memres) { + pr_err("%s: Invalid resource\n", __func__); + return -ENXIO; + } + + /* + * Setup our host structure + */ + + mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev); + if (!mmc) { + ret = -ENOMEM; + goto out; + } + + host = mmc_priv(mmc); + host->pdev_id = pdev->id; + host->plat = plat; + host->mmc = mmc; + + host->cmdpoll = 1; + + host->base = ioremap(memres->start, PAGE_SIZE); + if (!host->base) { + ret = -ENOMEM; + goto out; + } + + host->cmd_irqres = cmd_irqres; + host->pio_irqres = pio_irqres; + host->memres = memres; + host->dmares = dmares; + spin_lock_init(&host->lock); + + /* + * Setup DMA + */ + msmsdcc_init_dma(host); + + /* + * Setup main peripheral bus clock + */ + host->pclk = clk_get(&pdev->dev, "sdc_pclk"); + if (IS_ERR(host->pclk)) { + ret = PTR_ERR(host->pclk); + goto host_free; + } + + ret = clk_enable(host->pclk); + if (ret) + goto pclk_put; + + host->pclk_rate = clk_get_rate(host->pclk); + + /* + * Setup SDC MMC clock + */ + host->clk = clk_get(&pdev->dev, "sdc_clk"); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + goto pclk_disable; + } + + ret = clk_enable(host->clk); + if (ret) + goto clk_put; + + ret = clk_set_rate(host->clk, msmsdcc_fmin); + if (ret) { + pr_err("%s: Clock rate set failed (%d)\n", __func__, ret); + goto clk_disable; + } + + host->clk_rate = clk_get_rate(host->clk); + + host->clks_on = 1; + + /* + * Setup MMC host structure + */ + mmc->ops = &msmsdcc_ops; + mmc->f_min = msmsdcc_fmin; + mmc->f_max = msmsdcc_fmax; + mmc->ocr_avail = plat->ocr_mask; + + if (msmsdcc_4bit) + mmc->caps |= MMC_CAP_4_BIT_DATA; + if (msmsdcc_sdioirq) + mmc->caps |= MMC_CAP_SDIO_IRQ; + mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; + + mmc->max_phys_segs = NR_SG; + mmc->max_hw_segs = NR_SG; + mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */ + mmc->max_blk_count = 65536; + + mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */ + mmc->max_seg_size = mmc->max_req_size; + + writel(0, host->base + MMCIMASK0); + writel(0x5e007ff, host->base + MMCICLEAR); /* Add: 1 << 25 */ + + writel(MCI_IRQENABLE, host->base + MMCIMASK0); + host->saved_irq0mask = MCI_IRQENABLE; + + /* + * Setup card detect change + */ + + memset(&host->timer, 0, sizeof(host->timer)); + + if (stat_irqres && !(stat_irqres->flags & IORESOURCE_DISABLED)) { + unsigned long irqflags = IRQF_SHARED | + (stat_irqres->flags & IRQF_TRIGGER_MASK); + + host->stat_irq = stat_irqres->start; + ret = request_irq(host->stat_irq, + msmsdcc_platform_status_irq, + irqflags, + DRIVER_NAME " (slot)", + host); + if (ret) { + pr_err("%s: Unable to get slot IRQ %d (%d)\n", + mmc_hostname(mmc), host->stat_irq, ret); + goto clk_disable; + } + } else if (plat->register_status_notify) { + plat->register_status_notify(msmsdcc_status_notify_cb, host); + } else if (!plat->status) + pr_err("%s: No card detect facilities available\n", + mmc_hostname(mmc)); + else { + init_timer(&host->timer); + host->timer.data = (unsigned long)host; + host->timer.function = msmsdcc_check_status; + host->timer.expires = jiffies + HZ; + add_timer(&host->timer); + } + + if (plat->status) { + host->oldstat = host->plat->status(mmc_dev(host->mmc)); + host->eject = !host->oldstat; + } + + /* + * Setup a command timer. We currently need this due to + * some 'strange' timeout / error handling situations. + */ + init_timer(&host->command_timer); + host->command_timer.data = (unsigned long) host; + host->command_timer.function = msmsdcc_command_expired; + + ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED, + DRIVER_NAME " (cmd)", host); + if (ret) + goto stat_irq_free; + + ret = request_irq(pio_irqres->start, msmsdcc_pio_irq, IRQF_SHARED, + DRIVER_NAME " (pio)", host); + if (ret) + goto cmd_irq_free; + + mmc_set_drvdata(pdev, mmc); + mmc_add_host(mmc); + + pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n", + mmc_hostname(mmc), (unsigned long long)memres->start, + (unsigned int) cmd_irqres->start, + (unsigned int) host->stat_irq, host->dma.channel); + pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc), + (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled")); + pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n", + mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate); + pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject); + pr_info("%s: Power save feature enable = %d\n", + mmc_hostname(mmc), msmsdcc_pwrsave); + + if (host->dma.channel != -1) { + pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n", + mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr); + pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n", + mmc_hostname(mmc), host->dma.cmd_busaddr, + host->dma.cmdptr_busaddr); + } else + pr_info("%s: PIO transfer enabled\n", mmc_hostname(mmc)); + if (host->timer.function) + pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc)); + + return 0; + cmd_irq_free: + free_irq(cmd_irqres->start, host); + stat_irq_free: + if (host->stat_irq) + free_irq(host->stat_irq, host); + clk_disable: + clk_disable(host->clk); + clk_put: + clk_put(host->clk); + pclk_disable: + clk_disable(host->pclk); + pclk_put: + clk_put(host->pclk); + host_free: + mmc_free_host(mmc); + out: + return ret; +} + +static int +msmsdcc_suspend(struct platform_device *dev, pm_message_t state) +{ + struct mmc_host *mmc = mmc_get_drvdata(dev); + int rc = 0; + + if (mmc) { + struct msmsdcc_host *host = mmc_priv(mmc); + + if (host->stat_irq) + disable_irq(host->stat_irq); + + if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) + rc = mmc_suspend_host(mmc, state); + if (!rc) { + writel(0, host->base + MMCIMASK0); + + if (host->clks_on) { + clk_disable(host->clk); + clk_disable(host->pclk); + host->clks_on = 0; + } + } + } + return rc; +} + +static int +msmsdcc_resume(struct platform_device *dev) +{ + struct mmc_host *mmc = mmc_get_drvdata(dev); + unsigned long flags; + + if (mmc) { + struct msmsdcc_host *host = mmc_priv(mmc); + + spin_lock_irqsave(&host->lock, flags); + + if (!host->clks_on) { + clk_enable(host->pclk); + clk_enable(host->clk); + host->clks_on = 1; + } + + writel(host->saved_irq0mask, host->base + MMCIMASK0); + + spin_unlock_irqrestore(&host->lock, flags); + + if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) + mmc_resume_host(mmc); + if (host->stat_irq) + enable_irq(host->stat_irq); + else if (host->stat_irq) + enable_irq(host->stat_irq); + } + return 0; +} + +static struct platform_driver msmsdcc_driver = { + .probe = msmsdcc_probe, + .suspend = msmsdcc_suspend, + .resume = msmsdcc_resume, + .driver = { + .name = "msm_sdcc", + }, +}; + +static int __init msmsdcc_init(void) +{ + return platform_driver_register(&msmsdcc_driver); +} + +static void __exit msmsdcc_exit(void) +{ + platform_driver_unregister(&msmsdcc_driver); +} + +module_init(msmsdcc_init); +module_exit(msmsdcc_exit); + +MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h new file mode 100644 index 00000000000..8c844846981 --- /dev/null +++ b/drivers/mmc/host/msm_sdcc.h @@ -0,0 +1,238 @@ +/* + * linux/drivers/mmc/host/msmsdcc.h - QCT MSM7K SDC Controller + * + * Copyright (C) 2008 Google, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * - Based on mmci.h + */ + +#ifndef _MSM_SDCC_H +#define _MSM_SDCC_H + +#define MSMSDCC_CRCI_SDC1 6 +#define MSMSDCC_CRCI_SDC2 7 +#define MSMSDCC_CRCI_SDC3 12 +#define MSMSDCC_CRCI_SDC4 13 + +#define MMCIPOWER 0x000 +#define MCI_PWR_OFF 0x00 +#define MCI_PWR_UP 0x02 +#define MCI_PWR_ON 0x03 +#define MCI_OD (1 << 6) + +#define MMCICLOCK 0x004 +#define MCI_CLK_ENABLE (1 << 8) +#define MCI_CLK_PWRSAVE (1 << 9) +#define MCI_CLK_WIDEBUS (1 << 10) +#define MCI_CLK_FLOWENA (1 << 12) +#define MCI_CLK_INVERTOUT (1 << 13) +#define MCI_CLK_SELECTIN (1 << 14) + +#define MMCIARGUMENT 0x008 +#define MMCICOMMAND 0x00c +#define MCI_CPSM_RESPONSE (1 << 6) +#define MCI_CPSM_LONGRSP (1 << 7) +#define MCI_CPSM_INTERRUPT (1 << 8) +#define MCI_CPSM_PENDING (1 << 9) +#define MCI_CPSM_ENABLE (1 << 10) +#define MCI_CPSM_PROGENA (1 << 11) +#define MCI_CSPM_DATCMD (1 << 12) +#define MCI_CSPM_MCIABORT (1 << 13) +#define MCI_CSPM_CCSENABLE (1 << 14) +#define MCI_CSPM_CCSDISABLE (1 << 15) + + +#define MMCIRESPCMD 0x010 +#define MMCIRESPONSE0 0x014 +#define MMCIRESPONSE1 0x018 +#define MMCIRESPONSE2 0x01c +#define MMCIRESPONSE3 0x020 +#define MMCIDATATIMER 0x024 +#define MMCIDATALENGTH 0x028 + +#define MMCIDATACTRL 0x02c +#define MCI_DPSM_ENABLE (1 << 0) +#define MCI_DPSM_DIRECTION (1 << 1) +#define MCI_DPSM_MODE (1 << 2) +#define MCI_DPSM_DMAENABLE (1 << 3) + +#define MMCIDATACNT 0x030 +#define MMCISTATUS 0x034 +#define MCI_CMDCRCFAIL (1 << 0) +#define MCI_DATACRCFAIL (1 << 1) +#define MCI_CMDTIMEOUT (1 << 2) +#define MCI_DATATIMEOUT (1 << 3) +#define MCI_TXUNDERRUN (1 << 4) +#define MCI_RXOVERRUN (1 << 5) +#define MCI_CMDRESPEND (1 << 6) +#define MCI_CMDSENT (1 << 7) +#define MCI_DATAEND (1 << 8) +#define MCI_DATABLOCKEND (1 << 10) +#define MCI_CMDACTIVE (1 << 11) +#define MCI_TXACTIVE (1 << 12) +#define MCI_RXACTIVE (1 << 13) +#define MCI_TXFIFOHALFEMPTY (1 << 14) +#define MCI_RXFIFOHALFFULL (1 << 15) +#define MCI_TXFIFOFULL (1 << 16) +#define MCI_RXFIFOFULL (1 << 17) +#define MCI_TXFIFOEMPTY (1 << 18) +#define MCI_RXFIFOEMPTY (1 << 19) +#define MCI_TXDATAAVLBL (1 << 20) +#define MCI_RXDATAAVLBL (1 << 21) +#define MCI_SDIOINTR (1 << 22) +#define MCI_PROGDONE (1 << 23) +#define MCI_ATACMDCOMPL (1 << 24) +#define MCI_SDIOINTOPER (1 << 25) +#define MCI_CCSTIMEOUT (1 << 26) + +#define MMCICLEAR 0x038 +#define MCI_CMDCRCFAILCLR (1 << 0) +#define MCI_DATACRCFAILCLR (1 << 1) +#define MCI_CMDTIMEOUTCLR (1 << 2) +#define MCI_DATATIMEOUTCLR (1 << 3) +#define MCI_TXUNDERRUNCLR (1 << 4) +#define MCI_RXOVERRUNCLR (1 << 5) +#define MCI_CMDRESPENDCLR (1 << 6) +#define MCI_CMDSENTCLR (1 << 7) +#define MCI_DATAENDCLR (1 << 8) +#define MCI_DATABLOCKENDCLR (1 << 10) + +#define MMCIMASK0 0x03c +#define MCI_CMDCRCFAILMASK (1 << 0) +#define MCI_DATACRCFAILMASK (1 << 1) +#define MCI_CMDTIMEOUTMASK (1 << 2) +#define MCI_DATATIMEOUTMASK (1 << 3) +#define MCI_TXUNDERRUNMASK (1 << 4) +#define MCI_RXOVERRUNMASK (1 << 5) +#define MCI_CMDRESPENDMASK (1 << 6) +#define MCI_CMDSENTMASK (1 << 7) +#define MCI_DATAENDMASK (1 << 8) +#define MCI_DATABLOCKENDMASK (1 << 10) +#define MCI_CMDACTIVEMASK (1 << 11) +#define MCI_TXACTIVEMASK (1 << 12) +#define MCI_RXACTIVEMASK (1 << 13) +#define MCI_TXFIFOHALFEMPTYMASK (1 << 14) +#define MCI_RXFIFOHALFFULLMASK (1 << 15) +#define MCI_TXFIFOFULLMASK (1 << 16) +#define MCI_RXFIFOFULLMASK (1 << 17) +#define MCI_TXFIFOEMPTYMASK (1 << 18) +#define MCI_RXFIFOEMPTYMASK (1 << 19) +#define MCI_TXDATAAVLBLMASK (1 << 20) +#define MCI_RXDATAAVLBLMASK (1 << 21) +#define MCI_SDIOINTMASK (1 << 22) +#define MCI_PROGDONEMASK (1 << 23) +#define MCI_ATACMDCOMPLMASK (1 << 24) +#define MCI_SDIOINTOPERMASK (1 << 25) +#define MCI_CCSTIMEOUTMASK (1 << 26) + +#define MMCIMASK1 0x040 +#define MMCIFIFOCNT 0x044 +#define MCICCSTIMER 0x058 + +#define MMCIFIFO 0x080 /* to 0x0bc */ + +#define MCI_IRQENABLE \ + (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \ + MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ + MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK) + +/* + * The size of the FIFO in bytes. + */ +#define MCI_FIFOSIZE (16*4) + +#define MCI_FIFOHALFSIZE (MCI_FIFOSIZE / 2) + +#define NR_SG 32 + +struct clk; + +struct msmsdcc_nc_dmadata { + dmov_box cmd[NR_SG]; + uint32_t cmdptr; +}; + +struct msmsdcc_dma_data { + struct msmsdcc_nc_dmadata *nc; + dma_addr_t nc_busaddr; + dma_addr_t cmd_busaddr; + dma_addr_t cmdptr_busaddr; + + struct msm_dmov_cmd hdr; + enum dma_data_direction dir; + + struct scatterlist *sg; + int num_ents; + + int channel; + struct msmsdcc_host *host; + int busy; /* Set if DM is busy */ +}; + +struct msmsdcc_pio_data { + struct scatterlist *sg; + unsigned int sg_len; + unsigned int sg_off; +}; + +struct msmsdcc_curr_req { + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + unsigned int xfer_size; /* Total data size */ + unsigned int xfer_remain; /* Bytes remaining to send */ + unsigned int data_xfered; /* Bytes acked by BLKEND irq */ + int got_dataend; + int got_datablkend; + int user_pages; +}; + +struct msmsdcc_stats { + unsigned int reqs; + unsigned int cmds; + unsigned int cmdpoll_hits; + unsigned int cmdpoll_misses; +}; + +struct msmsdcc_host { + struct resource *cmd_irqres; + struct resource *pio_irqres; + struct resource *memres; + struct resource *dmares; + void __iomem *base; + int pdev_id; + unsigned int stat_irq; + + struct msmsdcc_curr_req curr; + + struct mmc_host *mmc; + struct clk *clk; /* main MMC bus clock */ + struct clk *pclk; /* SDCC peripheral bus clock */ + unsigned int clks_on; /* set if clocks are enabled */ + struct timer_list command_timer; + + unsigned int eject; /* eject state */ + + spinlock_t lock; + + unsigned int clk_rate; /* Current clock rate */ + unsigned int pclk_rate; + + u32 pwr; + u32 saved_irq0mask; /* MMCIMASK0 reg value */ + struct mmc_platform_data *plat; + + struct timer_list timer; + unsigned int oldstat; + + struct msmsdcc_dma_data dma; + struct msmsdcc_pio_data pio; + int cmdpoll; + struct msmsdcc_stats stats; +}; + +#endif diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index bc14bb1b057..88671529c45 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -512,7 +512,7 @@ static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat) } /* For the DMA case the DMA engine handles the data transfer - * automatically. For non DMA we have to to it ourselves. + * automatically. For non DMA we have to do it ourselves. * Don't do it in interrupt context though. */ if (!mxcmci_use_dma(host) && host->data) diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 1cf9cfb3b64..4487cc09791 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -17,6 +17,8 @@ #include <linux/module.h> #include <linux/init.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/dma-mapping.h> @@ -25,6 +27,7 @@ #include <linux/timer.h> #include <linux/clk.h> #include <linux/mmc/host.h> +#include <linux/mmc/core.h> #include <linux/io.h> #include <linux/semaphore.h> #include <mach/dma.h> @@ -35,6 +38,7 @@ /* OMAP HSMMC Host Controller Registers */ #define OMAP_HSMMC_SYSCONFIG 0x0010 +#define OMAP_HSMMC_SYSSTATUS 0x0014 #define OMAP_HSMMC_CON 0x002C #define OMAP_HSMMC_BLK 0x0104 #define OMAP_HSMMC_ARG 0x0108 @@ -70,6 +74,8 @@ #define DTO_MASK 0x000F0000 #define DTO_SHIFT 16 #define INT_EN_MASK 0x307F0033 +#define BWR_ENABLE (1 << 4) +#define BRR_ENABLE (1 << 5) #define INIT_STREAM (1 << 1) #define DP_SELECT (1 << 21) #define DDIR (1 << 4) @@ -92,6 +98,8 @@ #define DUAL_VOLT_OCR_BIT 7 #define SRC (1 << 25) #define SRD (1 << 26) +#define SOFTRESET (1 << 1) +#define RESETDONE (1 << 0) /* * FIXME: Most likely all the data using these _DEVID defines should come @@ -101,11 +109,18 @@ #define OMAP_MMC1_DEVID 0 #define OMAP_MMC2_DEVID 1 #define OMAP_MMC3_DEVID 2 +#define OMAP_MMC4_DEVID 3 +#define OMAP_MMC5_DEVID 4 #define MMC_TIMEOUT_MS 20 #define OMAP_MMC_MASTER_CLOCK 96000000 #define DRIVER_NAME "mmci-omap-hs" +/* Timeouts for entering power saving states on inactivity, msec */ +#define OMAP_MMC_DISABLED_TIMEOUT 100 +#define OMAP_MMC_SLEEP_TIMEOUT 1000 +#define OMAP_MMC_OFF_TIMEOUT 8000 + /* * One controller can have multiple slots, like on some omap boards using * omap.c controller driver. Luckily this is not currently done on any known @@ -122,7 +137,7 @@ #define OMAP_HSMMC_WRITE(base, reg, val) \ __raw_writel((val), (base) + OMAP_HSMMC_##reg) -struct mmc_omap_host { +struct omap_hsmmc_host { struct device *dev; struct mmc_host *mmc; struct mmc_request *mrq; @@ -135,27 +150,35 @@ struct mmc_omap_host { struct work_struct mmc_carddetect_work; void __iomem *base; resource_size_t mapbase; + spinlock_t irq_lock; /* Prevent races with irq handler */ + unsigned long flags; unsigned int id; unsigned int dma_len; unsigned int dma_sg_idx; unsigned char bus_mode; + unsigned char power_mode; u32 *buffer; u32 bytesleft; int suspended; int irq; - int carddetect; int use_dma, dma_ch; int dma_line_tx, dma_line_rx; int slot_id; - int dbclk_enabled; + int got_dbclk; int response_busy; + int context_loss; + int dpm_state; + int vdd; + int protect_card; + int reqs_blocked; + struct omap_mmc_platform_data *pdata; }; /* * Stop clock to the card */ -static void omap_mmc_stop_clock(struct mmc_omap_host *host) +static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) { OMAP_HSMMC_WRITE(host->base, SYSCTL, OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN); @@ -163,15 +186,178 @@ static void omap_mmc_stop_clock(struct mmc_omap_host *host) dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); } +#ifdef CONFIG_PM + +/* + * Restore the MMC host context, if it was lost as result of a + * power state change. + */ +static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) +{ + struct mmc_ios *ios = &host->mmc->ios; + struct omap_mmc_platform_data *pdata = host->pdata; + int context_loss = 0; + u32 hctl, capa, con; + u16 dsor = 0; + unsigned long timeout; + + if (pdata->get_context_loss_count) { + context_loss = pdata->get_context_loss_count(host->dev); + if (context_loss < 0) + return 1; + } + + dev_dbg(mmc_dev(host->mmc), "context was %slost\n", + context_loss == host->context_loss ? "not " : ""); + if (host->context_loss == context_loss) + return 1; + + /* Wait for hardware reset */ + timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); + while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE + && time_before(jiffies, timeout)) + ; + + /* Do software reset */ + OMAP_HSMMC_WRITE(host->base, SYSCONFIG, SOFTRESET); + timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); + while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE + && time_before(jiffies, timeout)) + ; + + OMAP_HSMMC_WRITE(host->base, SYSCONFIG, + OMAP_HSMMC_READ(host->base, SYSCONFIG) | AUTOIDLE); + + if (host->id == OMAP_MMC1_DEVID) { + if (host->power_mode != MMC_POWER_OFF && + (1 << ios->vdd) <= MMC_VDD_23_24) + hctl = SDVS18; + else + hctl = SDVS30; + capa = VS30 | VS18; + } else { + hctl = SDVS18; + capa = VS18; + } + + OMAP_HSMMC_WRITE(host->base, HCTL, + OMAP_HSMMC_READ(host->base, HCTL) | hctl); + + OMAP_HSMMC_WRITE(host->base, CAPA, + OMAP_HSMMC_READ(host->base, CAPA) | capa); + + OMAP_HSMMC_WRITE(host->base, HCTL, + OMAP_HSMMC_READ(host->base, HCTL) | SDBP); + + timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); + while ((OMAP_HSMMC_READ(host->base, HCTL) & SDBP) != SDBP + && time_before(jiffies, timeout)) + ; + + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); + OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); + OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); + + /* Do not initialize card-specific things if the power is off */ + if (host->power_mode == MMC_POWER_OFF) + goto out; + + con = OMAP_HSMMC_READ(host->base, CON); + switch (ios->bus_width) { + case MMC_BUS_WIDTH_8: + OMAP_HSMMC_WRITE(host->base, CON, con | DW8); + break; + case MMC_BUS_WIDTH_4: + OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); + OMAP_HSMMC_WRITE(host->base, HCTL, + OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT); + break; + case MMC_BUS_WIDTH_1: + OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); + OMAP_HSMMC_WRITE(host->base, HCTL, + OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT); + break; + } + + if (ios->clock) { + dsor = OMAP_MMC_MASTER_CLOCK / ios->clock; + if (dsor < 1) + dsor = 1; + + if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock) + dsor++; + + if (dsor > 250) + dsor = 250; + } + + OMAP_HSMMC_WRITE(host->base, SYSCTL, + OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN); + OMAP_HSMMC_WRITE(host->base, SYSCTL, (dsor << 6) | (DTO << 16)); + OMAP_HSMMC_WRITE(host->base, SYSCTL, + OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); + + timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); + while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS + && time_before(jiffies, timeout)) + ; + + OMAP_HSMMC_WRITE(host->base, SYSCTL, + OMAP_HSMMC_READ(host->base, SYSCTL) | CEN); + + con = OMAP_HSMMC_READ(host->base, CON); + if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) + OMAP_HSMMC_WRITE(host->base, CON, con | OD); + else + OMAP_HSMMC_WRITE(host->base, CON, con & ~OD); +out: + host->context_loss = context_loss; + + dev_dbg(mmc_dev(host->mmc), "context is restored\n"); + return 0; +} + +/* + * Save the MMC host context (store the number of power state changes so far). + */ +static void omap_hsmmc_context_save(struct omap_hsmmc_host *host) +{ + struct omap_mmc_platform_data *pdata = host->pdata; + int context_loss; + + if (pdata->get_context_loss_count) { + context_loss = pdata->get_context_loss_count(host->dev); + if (context_loss < 0) + return; + host->context_loss = context_loss; + } +} + +#else + +static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) +{ + return 0; +} + +static void omap_hsmmc_context_save(struct omap_hsmmc_host *host) +{ +} + +#endif + /* * Send init stream sequence to card * before sending IDLE command */ -static void send_init_stream(struct mmc_omap_host *host) +static void send_init_stream(struct omap_hsmmc_host *host) { int reg = 0; unsigned long timeout; + if (host->protect_card) + return; + disable_irq(host->irq); OMAP_HSMMC_WRITE(host->base, CON, OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM); @@ -183,51 +369,53 @@ static void send_init_stream(struct mmc_omap_host *host) OMAP_HSMMC_WRITE(host->base, CON, OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM); + + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); + OMAP_HSMMC_READ(host->base, STAT); + enable_irq(host->irq); } static inline -int mmc_omap_cover_is_closed(struct mmc_omap_host *host) +int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host) { int r = 1; - if (host->pdata->slots[host->slot_id].get_cover_state) - r = host->pdata->slots[host->slot_id].get_cover_state(host->dev, - host->slot_id); + if (mmc_slot(host).get_cover_state) + r = mmc_slot(host).get_cover_state(host->dev, host->slot_id); return r; } static ssize_t -mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr, +omap_hsmmc_show_cover_switch(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); - struct mmc_omap_host *host = mmc_priv(mmc); + struct omap_hsmmc_host *host = mmc_priv(mmc); - return sprintf(buf, "%s\n", mmc_omap_cover_is_closed(host) ? "closed" : - "open"); + return sprintf(buf, "%s\n", + omap_hsmmc_cover_is_closed(host) ? "closed" : "open"); } -static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL); +static DEVICE_ATTR(cover_switch, S_IRUGO, omap_hsmmc_show_cover_switch, NULL); static ssize_t -mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr, +omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); - struct mmc_omap_host *host = mmc_priv(mmc); - struct omap_mmc_slot_data slot = host->pdata->slots[host->slot_id]; + struct omap_hsmmc_host *host = mmc_priv(mmc); - return sprintf(buf, "%s\n", slot.name); + return sprintf(buf, "%s\n", mmc_slot(host).name); } -static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL); +static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL); /* * Configure the response type and send the cmd. */ static void -mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd, +omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, struct mmc_data *data) { int cmdreg = 0, resptype = 0, cmdtype = 0; @@ -241,7 +429,12 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd, */ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); - OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); + + if (host->use_dma) + OMAP_HSMMC_WRITE(host->base, IE, + INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE)); + else + OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); host->response_busy = 0; if (cmd->flags & MMC_RSP_PRESENT) { @@ -275,12 +468,20 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd, if (host->use_dma) cmdreg |= DMA_EN; + /* + * In an interrupt context (i.e. STOP command), the spinlock is unlocked + * by the interrupt handler, otherwise (i.e. for a new request) it is + * unlocked here. + */ + if (!in_interrupt()) + spin_unlock_irqrestore(&host->irq_lock, host->flags); + OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); } static int -mmc_omap_get_dma_dir(struct mmc_omap_host *host, struct mmc_data *data) +omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data) { if (data->flags & MMC_DATA_WRITE) return DMA_TO_DEVICE; @@ -292,11 +493,18 @@ mmc_omap_get_dma_dir(struct mmc_omap_host *host, struct mmc_data *data) * Notify the transfer complete to MMC core */ static void -mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data) +omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data) { if (!data) { struct mmc_request *mrq = host->mrq; + /* TC before CC from CMD6 - don't know why, but it happens */ + if (host->cmd && host->cmd->opcode == 6 && + host->response_busy) { + host->response_busy = 0; + return; + } + host->mrq = NULL; mmc_request_done(host->mmc, mrq); return; @@ -306,7 +514,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data) if (host->use_dma && host->dma_ch != -1) dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len, - mmc_omap_get_dma_dir(host, data)); + omap_hsmmc_get_dma_dir(host, data)); if (!data->error) data->bytes_xfered += data->blocks * (data->blksz); @@ -318,14 +526,14 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data) mmc_request_done(host->mmc, data->mrq); return; } - mmc_omap_start_command(host, data->stop, NULL); + omap_hsmmc_start_command(host, data->stop, NULL); } /* * Notify the core about command completion */ static void -mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd) +omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd) { host->cmd = NULL; @@ -350,13 +558,13 @@ mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd) /* * DMA clean up for command errors */ -static void mmc_dma_cleanup(struct mmc_omap_host *host, int errno) +static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) { host->data->error = errno; if (host->use_dma && host->dma_ch != -1) { dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, - mmc_omap_get_dma_dir(host, host->data)); + omap_hsmmc_get_dma_dir(host, host->data)); omap_free_dma(host->dma_ch); host->dma_ch = -1; up(&host->sem); @@ -368,10 +576,10 @@ static void mmc_dma_cleanup(struct mmc_omap_host *host, int errno) * Readable error output */ #ifdef CONFIG_MMC_DEBUG -static void mmc_omap_report_irq(struct mmc_omap_host *host, u32 status) +static void omap_hsmmc_report_irq(struct omap_hsmmc_host *host, u32 status) { /* --- means reserved bit without definition at documentation */ - static const char *mmc_omap_status_bits[] = { + static const char *omap_hsmmc_status_bits[] = { "CC", "TC", "BGE", "---", "BWR", "BRR", "---", "---", "CIRQ", "OBI", "---", "---", "---", "---", "---", "ERRI", "CTO", "CCRC", "CEB", "CIE", "DTO", "DCRC", "DEB", "---", "ACE", "---", @@ -384,9 +592,9 @@ static void mmc_omap_report_irq(struct mmc_omap_host *host, u32 status) len = sprintf(buf, "MMC IRQ 0x%x :", status); buf += len; - for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++) + for (i = 0; i < ARRAY_SIZE(omap_hsmmc_status_bits); i++) if (status & (1 << i)) { - len = sprintf(buf, " %s", mmc_omap_status_bits[i]); + len = sprintf(buf, " %s", omap_hsmmc_status_bits[i]); buf += len; } @@ -401,8 +609,8 @@ static void mmc_omap_report_irq(struct mmc_omap_host *host, u32 status) * SRC or SRD bit of SYSCTL register * Can be called from interrupt context */ -static inline void mmc_omap_reset_controller_fsm(struct mmc_omap_host *host, - unsigned long bit) +static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, + unsigned long bit) { unsigned long i = 0; unsigned long limit = (loops_per_jiffy * @@ -424,17 +632,20 @@ static inline void mmc_omap_reset_controller_fsm(struct mmc_omap_host *host, /* * MMC controller IRQ handler */ -static irqreturn_t mmc_omap_irq(int irq, void *dev_id) +static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) { - struct mmc_omap_host *host = dev_id; + struct omap_hsmmc_host *host = dev_id; struct mmc_data *data; int end_cmd = 0, end_trans = 0, status; + spin_lock(&host->irq_lock); + if (host->mrq == NULL) { OMAP_HSMMC_WRITE(host->base, STAT, OMAP_HSMMC_READ(host->base, STAT)); /* Flush posted write */ OMAP_HSMMC_READ(host->base, STAT); + spin_unlock(&host->irq_lock); return IRQ_HANDLED; } @@ -444,13 +655,14 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) if (status & ERR) { #ifdef CONFIG_MMC_DEBUG - mmc_omap_report_irq(host, status); + omap_hsmmc_report_irq(host, status); #endif if ((status & CMD_TIMEOUT) || (status & CMD_CRC)) { if (host->cmd) { if (status & CMD_TIMEOUT) { - mmc_omap_reset_controller_fsm(host, SRC); + omap_hsmmc_reset_controller_fsm(host, + SRC); host->cmd->error = -ETIMEDOUT; } else { host->cmd->error = -EILSEQ; @@ -459,9 +671,10 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) } if (host->data || host->response_busy) { if (host->data) - mmc_dma_cleanup(host, -ETIMEDOUT); + omap_hsmmc_dma_cleanup(host, + -ETIMEDOUT); host->response_busy = 0; - mmc_omap_reset_controller_fsm(host, SRD); + omap_hsmmc_reset_controller_fsm(host, SRD); } } if ((status & DATA_TIMEOUT) || @@ -471,11 +684,11 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) -ETIMEDOUT : -EILSEQ; if (host->data) - mmc_dma_cleanup(host, err); + omap_hsmmc_dma_cleanup(host, err); else host->mrq->cmd->error = err; host->response_busy = 0; - mmc_omap_reset_controller_fsm(host, SRD); + omap_hsmmc_reset_controller_fsm(host, SRD); end_trans = 1; } } @@ -494,14 +707,16 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) OMAP_HSMMC_READ(host->base, STAT); if (end_cmd || ((status & CC) && host->cmd)) - mmc_omap_cmd_done(host, host->cmd); - if (end_trans || (status & TC)) - mmc_omap_xfer_done(host, data); + omap_hsmmc_cmd_done(host, host->cmd); + if ((end_trans || (status & TC)) && host->mrq) + omap_hsmmc_xfer_done(host, data); + + spin_unlock(&host->irq_lock); return IRQ_HANDLED; } -static void set_sd_bus_power(struct mmc_omap_host *host) +static void set_sd_bus_power(struct omap_hsmmc_host *host) { unsigned long i; @@ -521,7 +736,7 @@ static void set_sd_bus_power(struct mmc_omap_host *host) * The MMC2 transceiver controls are used instead of DAT4..DAT7. * Some chips, like eMMC ones, use internal transceivers. */ -static int omap_mmc_switch_opcond(struct mmc_omap_host *host, int vdd) +static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) { u32 reg_val = 0; int ret; @@ -529,22 +744,24 @@ static int omap_mmc_switch_opcond(struct mmc_omap_host *host, int vdd) /* Disable the clocks */ clk_disable(host->fclk); clk_disable(host->iclk); - clk_disable(host->dbclk); + if (host->got_dbclk) + clk_disable(host->dbclk); /* Turn the power off */ ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); - if (ret != 0) - goto err; /* Turn the power ON with given VDD 1.8 or 3.0v */ - ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, vdd); + if (!ret) + ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, + vdd); + clk_enable(host->iclk); + clk_enable(host->fclk); + if (host->got_dbclk) + clk_enable(host->dbclk); + if (ret != 0) goto err; - clk_enable(host->fclk); - clk_enable(host->iclk); - clk_enable(host->dbclk); - OMAP_HSMMC_WRITE(host->base, HCTL, OMAP_HSMMC_READ(host->base, HCTL) & SDVSCLR); reg_val = OMAP_HSMMC_READ(host->base, HCTL); @@ -552,7 +769,7 @@ static int omap_mmc_switch_opcond(struct mmc_omap_host *host, int vdd) /* * If a MMC dual voltage card is detected, the set_ios fn calls * this fn with VDD bit set for 1.8V. Upon card removal from the - * slot, omap_mmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF. + * slot, omap_hsmmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF. * * Cope with a bit of slop in the range ... per data sheets: * - "1.8V" for vdds_mmc1/vdds_mmc1a can be up to 2.45V max, @@ -578,25 +795,59 @@ err: return ret; } +/* Protect the card while the cover is open */ +static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host) +{ + if (!mmc_slot(host).get_cover_state) + return; + + host->reqs_blocked = 0; + if (mmc_slot(host).get_cover_state(host->dev, host->slot_id)) { + if (host->protect_card) { + printk(KERN_INFO "%s: cover is closed, " + "card is now accessible\n", + mmc_hostname(host->mmc)); + host->protect_card = 0; + } + } else { + if (!host->protect_card) { + printk(KERN_INFO "%s: cover is open, " + "card is now inaccessible\n", + mmc_hostname(host->mmc)); + host->protect_card = 1; + } + } +} + /* * Work Item to notify the core about card insertion/removal */ -static void mmc_omap_detect(struct work_struct *work) +static void omap_hsmmc_detect(struct work_struct *work) { - struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, - mmc_carddetect_work); + struct omap_hsmmc_host *host = + container_of(work, struct omap_hsmmc_host, mmc_carddetect_work); struct omap_mmc_slot_data *slot = &mmc_slot(host); + int carddetect; - if (mmc_slot(host).card_detect) - host->carddetect = slot->card_detect(slot->card_detect_irq); - else - host->carddetect = -ENOSYS; + if (host->suspended) + return; sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch"); - if (host->carddetect) { + + if (slot->card_detect) + carddetect = slot->card_detect(slot->card_detect_irq); + else { + omap_hsmmc_protect_card(host); + carddetect = -ENOSYS; + } + + if (carddetect) { mmc_detect_change(host->mmc, (HZ * 200) / 1000); } else { - mmc_omap_reset_controller_fsm(host, SRD); + mmc_host_enable(host->mmc); + omap_hsmmc_reset_controller_fsm(host, SRD); + mmc_host_lazy_disable(host->mmc); + mmc_detect_change(host->mmc, (HZ * 50) / 1000); } } @@ -604,16 +855,18 @@ static void mmc_omap_detect(struct work_struct *work) /* * ISR for handling card insertion and removal */ -static irqreturn_t omap_mmc_cd_handler(int irq, void *dev_id) +static irqreturn_t omap_hsmmc_cd_handler(int irq, void *dev_id) { - struct mmc_omap_host *host = (struct mmc_omap_host *)dev_id; + struct omap_hsmmc_host *host = (struct omap_hsmmc_host *)dev_id; + if (host->suspended) + return IRQ_HANDLED; schedule_work(&host->mmc_carddetect_work); return IRQ_HANDLED; } -static int mmc_omap_get_dma_sync_dev(struct mmc_omap_host *host, +static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host, struct mmc_data *data) { int sync_dev; @@ -625,7 +878,7 @@ static int mmc_omap_get_dma_sync_dev(struct mmc_omap_host *host, return sync_dev; } -static void mmc_omap_config_dma_params(struct mmc_omap_host *host, +static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host, struct mmc_data *data, struct scatterlist *sgl) { @@ -639,7 +892,7 @@ static void mmc_omap_config_dma_params(struct mmc_omap_host *host, sg_dma_address(sgl), 0, 0); } else { omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - (host->mapbase + OMAP_HSMMC_DATA), 0, 0); + (host->mapbase + OMAP_HSMMC_DATA), 0, 0); omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, sg_dma_address(sgl), 0, 0); } @@ -649,7 +902,7 @@ static void mmc_omap_config_dma_params(struct mmc_omap_host *host, omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, blksz / 4, nblk, OMAP_DMA_SYNC_FRAME, - mmc_omap_get_dma_sync_dev(host, data), + omap_hsmmc_get_dma_sync_dev(host, data), !(data->flags & MMC_DATA_WRITE)); omap_start_dma(dma_ch); @@ -658,9 +911,9 @@ static void mmc_omap_config_dma_params(struct mmc_omap_host *host, /* * DMA call back function */ -static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) +static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data) { - struct mmc_omap_host *host = data; + struct omap_hsmmc_host *host = data; if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ) dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n"); @@ -671,7 +924,7 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) host->dma_sg_idx++; if (host->dma_sg_idx < host->dma_len) { /* Fire up the next transfer. */ - mmc_omap_config_dma_params(host, host->data, + omap_hsmmc_config_dma_params(host, host->data, host->data->sg + host->dma_sg_idx); return; } @@ -688,14 +941,14 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) /* * Routine to configure and start DMA for the MMC card */ -static int -mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req) +static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, + struct mmc_request *req) { int dma_ch = 0, ret = 0, err = 1, i; struct mmc_data *data = req->data; /* Sanity check: all the SG entries must be aligned by block size. */ - for (i = 0; i < host->dma_len; i++) { + for (i = 0; i < data->sg_len; i++) { struct scatterlist *sgl; sgl = data->sg + i; @@ -726,8 +979,8 @@ mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req) return err; } - ret = omap_request_dma(mmc_omap_get_dma_sync_dev(host, data), "MMC/SD", - mmc_omap_dma_cb,host, &dma_ch); + ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), + "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); if (ret != 0) { dev_err(mmc_dev(host->mmc), "%s: omap_request_dma() failed with %d\n", @@ -736,17 +989,18 @@ mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req) } host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, mmc_omap_get_dma_dir(host, data)); + data->sg_len, omap_hsmmc_get_dma_dir(host, data)); host->dma_ch = dma_ch; host->dma_sg_idx = 0; - mmc_omap_config_dma_params(host, data, data->sg); + omap_hsmmc_config_dma_params(host, data, data->sg); return 0; } -static void set_data_timeout(struct mmc_omap_host *host, - struct mmc_request *req) +static void set_data_timeout(struct omap_hsmmc_host *host, + unsigned int timeout_ns, + unsigned int timeout_clks) { unsigned int timeout, cycle_ns; uint32_t reg, clkd, dto = 0; @@ -757,8 +1011,8 @@ static void set_data_timeout(struct mmc_omap_host *host, clkd = 1; cycle_ns = 1000000000 / (clk_get_rate(host->fclk) / clkd); - timeout = req->data->timeout_ns / cycle_ns; - timeout += req->data->timeout_clks; + timeout = timeout_ns / cycle_ns; + timeout += timeout_clks; if (timeout) { while ((timeout & 0x80000000) == 0) { dto += 1; @@ -785,22 +1039,28 @@ static void set_data_timeout(struct mmc_omap_host *host, * Configure block length for MMC/SD cards and initiate the transfer. */ static int -mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) +omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req) { int ret; host->data = req->data; if (req->data == NULL) { OMAP_HSMMC_WRITE(host->base, BLK, 0); + /* + * Set an arbitrary 100ms data timeout for commands with + * busy signal. + */ + if (req->cmd->flags & MMC_RSP_BUSY) + set_data_timeout(host, 100000000U, 0); return 0; } OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz) | (req->data->blocks << 16)); - set_data_timeout(host, req); + set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks); if (host->use_dma) { - ret = mmc_omap_start_dma_transfer(host, req); + ret = omap_hsmmc_start_dma_transfer(host, req); if (ret != 0) { dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n"); return ret; @@ -812,35 +1072,92 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) /* * Request function. for read/write operation */ -static void omap_mmc_request(struct mmc_host *mmc, struct mmc_request *req) +static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req) { - struct mmc_omap_host *host = mmc_priv(mmc); + struct omap_hsmmc_host *host = mmc_priv(mmc); + int err; + /* + * Prevent races with the interrupt handler because of unexpected + * interrupts, but not if we are already in interrupt context i.e. + * retries. + */ + if (!in_interrupt()) { + spin_lock_irqsave(&host->irq_lock, host->flags); + /* + * Protect the card from I/O if there is a possibility + * it can be removed. + */ + if (host->protect_card) { + if (host->reqs_blocked < 3) { + /* + * Ensure the controller is left in a consistent + * state by resetting the command and data state + * machines. + */ + omap_hsmmc_reset_controller_fsm(host, SRD); + omap_hsmmc_reset_controller_fsm(host, SRC); + host->reqs_blocked += 1; + } + req->cmd->error = -EBADF; + if (req->data) + req->data->error = -EBADF; + spin_unlock_irqrestore(&host->irq_lock, host->flags); + mmc_request_done(mmc, req); + return; + } else if (host->reqs_blocked) + host->reqs_blocked = 0; + } WARN_ON(host->mrq != NULL); host->mrq = req; - mmc_omap_prepare_data(host, req); - mmc_omap_start_command(host, req->cmd, req->data); -} + err = omap_hsmmc_prepare_data(host, req); + if (err) { + req->cmd->error = err; + if (req->data) + req->data->error = err; + host->mrq = NULL; + if (!in_interrupt()) + spin_unlock_irqrestore(&host->irq_lock, host->flags); + mmc_request_done(mmc, req); + return; + } + omap_hsmmc_start_command(host, req->cmd, req->data); +} /* Routine to configure clock values. Exposed API to core */ -static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { - struct mmc_omap_host *host = mmc_priv(mmc); + struct omap_hsmmc_host *host = mmc_priv(mmc); u16 dsor = 0; unsigned long regval; unsigned long timeout; u32 con; + int do_send_init_stream = 0; - switch (ios->power_mode) { - case MMC_POWER_OFF: - mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); - break; - case MMC_POWER_UP: - mmc_slot(host).set_power(host->dev, host->slot_id, 1, ios->vdd); - break; + mmc_host_enable(host->mmc); + + if (ios->power_mode != host->power_mode) { + switch (ios->power_mode) { + case MMC_POWER_OFF: + mmc_slot(host).set_power(host->dev, host->slot_id, + 0, 0); + host->vdd = 0; + break; + case MMC_POWER_UP: + mmc_slot(host).set_power(host->dev, host->slot_id, + 1, ios->vdd); + host->vdd = ios->vdd; + break; + case MMC_POWER_ON: + do_send_init_stream = 1; + break; + } + host->power_mode = ios->power_mode; } + /* FIXME: set registers based only on changes to ios */ + con = OMAP_HSMMC_READ(host->base, CON); switch (mmc->ios.bus_width) { case MMC_BUS_WIDTH_8: @@ -870,8 +1187,8 @@ static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) * MMC_POWER_UP upon recalculating the voltage. * vdd 1.8v. */ - if (omap_mmc_switch_opcond(host, ios->vdd) != 0) - dev_dbg(mmc_dev(host->mmc), + if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0) + dev_dbg(mmc_dev(host->mmc), "Switch operation failed\n"); } } @@ -887,7 +1204,7 @@ static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (dsor > 250) dsor = 250; } - omap_mmc_stop_clock(host); + omap_hsmmc_stop_clock(host); regval = OMAP_HSMMC_READ(host->base, SYSCTL); regval = regval & ~(CLKD_MASK); regval = regval | (dsor << 6) | (DTO << 16); @@ -897,42 +1214,47 @@ static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) /* Wait till the ICS bit is set */ timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); - while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != 0x2 + while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS && time_before(jiffies, timeout)) msleep(1); OMAP_HSMMC_WRITE(host->base, SYSCTL, OMAP_HSMMC_READ(host->base, SYSCTL) | CEN); - if (ios->power_mode == MMC_POWER_ON) + if (do_send_init_stream) send_init_stream(host); + con = OMAP_HSMMC_READ(host->base, CON); if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) - OMAP_HSMMC_WRITE(host->base, CON, - OMAP_HSMMC_READ(host->base, CON) | OD); + OMAP_HSMMC_WRITE(host->base, CON, con | OD); + else + OMAP_HSMMC_WRITE(host->base, CON, con & ~OD); + + if (host->power_mode == MMC_POWER_OFF) + mmc_host_disable(host->mmc); + else + mmc_host_lazy_disable(host->mmc); } static int omap_hsmmc_get_cd(struct mmc_host *mmc) { - struct mmc_omap_host *host = mmc_priv(mmc); - struct omap_mmc_platform_data *pdata = host->pdata; + struct omap_hsmmc_host *host = mmc_priv(mmc); - if (!pdata->slots[0].card_detect) + if (!mmc_slot(host).card_detect) return -ENOSYS; - return pdata->slots[0].card_detect(pdata->slots[0].card_detect_irq); + return mmc_slot(host).card_detect(mmc_slot(host).card_detect_irq); } static int omap_hsmmc_get_ro(struct mmc_host *mmc) { - struct mmc_omap_host *host = mmc_priv(mmc); - struct omap_mmc_platform_data *pdata = host->pdata; + struct omap_hsmmc_host *host = mmc_priv(mmc); - if (!pdata->slots[0].get_ro) + if (!mmc_slot(host).get_ro) return -ENOSYS; - return pdata->slots[0].get_ro(host->dev, 0); + return mmc_slot(host).get_ro(host->dev, 0); } -static void omap_hsmmc_init(struct mmc_omap_host *host) +static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host) { u32 hctl, capa, value; @@ -959,19 +1281,340 @@ static void omap_hsmmc_init(struct mmc_omap_host *host) set_sd_bus_power(host); } -static struct mmc_host_ops mmc_omap_ops = { - .request = omap_mmc_request, - .set_ios = omap_mmc_set_ios, +/* + * Dynamic power saving handling, FSM: + * ENABLED -> DISABLED -> CARDSLEEP / REGSLEEP -> OFF + * ^___________| | | + * |______________________|______________________| + * + * ENABLED: mmc host is fully functional + * DISABLED: fclk is off + * CARDSLEEP: fclk is off, card is asleep, voltage regulator is asleep + * REGSLEEP: fclk is off, voltage regulator is asleep + * OFF: fclk is off, voltage regulator is off + * + * Transition handlers return the timeout for the next state transition + * or negative error. + */ + +enum {ENABLED = 0, DISABLED, CARDSLEEP, REGSLEEP, OFF}; + +/* Handler for [ENABLED -> DISABLED] transition */ +static int omap_hsmmc_enabled_to_disabled(struct omap_hsmmc_host *host) +{ + omap_hsmmc_context_save(host); + clk_disable(host->fclk); + host->dpm_state = DISABLED; + + dev_dbg(mmc_dev(host->mmc), "ENABLED -> DISABLED\n"); + + if (host->power_mode == MMC_POWER_OFF) + return 0; + + return msecs_to_jiffies(OMAP_MMC_SLEEP_TIMEOUT); +} + +/* Handler for [DISABLED -> REGSLEEP / CARDSLEEP] transition */ +static int omap_hsmmc_disabled_to_sleep(struct omap_hsmmc_host *host) +{ + int err, new_state; + + if (!mmc_try_claim_host(host->mmc)) + return 0; + + clk_enable(host->fclk); + omap_hsmmc_context_restore(host); + if (mmc_card_can_sleep(host->mmc)) { + err = mmc_card_sleep(host->mmc); + if (err < 0) { + clk_disable(host->fclk); + mmc_release_host(host->mmc); + return err; + } + new_state = CARDSLEEP; + } else { + new_state = REGSLEEP; + } + if (mmc_slot(host).set_sleep) + mmc_slot(host).set_sleep(host->dev, host->slot_id, 1, 0, + new_state == CARDSLEEP); + /* FIXME: turn off bus power and perhaps interrupts too */ + clk_disable(host->fclk); + host->dpm_state = new_state; + + mmc_release_host(host->mmc); + + dev_dbg(mmc_dev(host->mmc), "DISABLED -> %s\n", + host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP"); + + if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) || + mmc_slot(host).card_detect || + (mmc_slot(host).get_cover_state && + mmc_slot(host).get_cover_state(host->dev, host->slot_id))) + return msecs_to_jiffies(OMAP_MMC_OFF_TIMEOUT); + + return 0; +} + +/* Handler for [REGSLEEP / CARDSLEEP -> OFF] transition */ +static int omap_hsmmc_sleep_to_off(struct omap_hsmmc_host *host) +{ + if (!mmc_try_claim_host(host->mmc)) + return 0; + + if (!((host->mmc->caps & MMC_CAP_NONREMOVABLE) || + mmc_slot(host).card_detect || + (mmc_slot(host).get_cover_state && + mmc_slot(host).get_cover_state(host->dev, host->slot_id)))) { + mmc_release_host(host->mmc); + return 0; + } + + mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); + host->vdd = 0; + host->power_mode = MMC_POWER_OFF; + + dev_dbg(mmc_dev(host->mmc), "%s -> OFF\n", + host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP"); + + host->dpm_state = OFF; + + mmc_release_host(host->mmc); + + return 0; +} + +/* Handler for [DISABLED -> ENABLED] transition */ +static int omap_hsmmc_disabled_to_enabled(struct omap_hsmmc_host *host) +{ + int err; + + err = clk_enable(host->fclk); + if (err < 0) + return err; + + omap_hsmmc_context_restore(host); + host->dpm_state = ENABLED; + + dev_dbg(mmc_dev(host->mmc), "DISABLED -> ENABLED\n"); + + return 0; +} + +/* Handler for [SLEEP -> ENABLED] transition */ +static int omap_hsmmc_sleep_to_enabled(struct omap_hsmmc_host *host) +{ + if (!mmc_try_claim_host(host->mmc)) + return 0; + + clk_enable(host->fclk); + omap_hsmmc_context_restore(host); + if (mmc_slot(host).set_sleep) + mmc_slot(host).set_sleep(host->dev, host->slot_id, 0, + host->vdd, host->dpm_state == CARDSLEEP); + if (mmc_card_can_sleep(host->mmc)) + mmc_card_awake(host->mmc); + + dev_dbg(mmc_dev(host->mmc), "%s -> ENABLED\n", + host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP"); + + host->dpm_state = ENABLED; + + mmc_release_host(host->mmc); + + return 0; +} + +/* Handler for [OFF -> ENABLED] transition */ +static int omap_hsmmc_off_to_enabled(struct omap_hsmmc_host *host) +{ + clk_enable(host->fclk); + + omap_hsmmc_context_restore(host); + omap_hsmmc_conf_bus_power(host); + mmc_power_restore_host(host->mmc); + + host->dpm_state = ENABLED; + + dev_dbg(mmc_dev(host->mmc), "OFF -> ENABLED\n"); + + return 0; +} + +/* + * Bring MMC host to ENABLED from any other PM state. + */ +static int omap_hsmmc_enable(struct mmc_host *mmc) +{ + struct omap_hsmmc_host *host = mmc_priv(mmc); + + switch (host->dpm_state) { + case DISABLED: + return omap_hsmmc_disabled_to_enabled(host); + case CARDSLEEP: + case REGSLEEP: + return omap_hsmmc_sleep_to_enabled(host); + case OFF: + return omap_hsmmc_off_to_enabled(host); + default: + dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n"); + return -EINVAL; + } +} + +/* + * Bring MMC host in PM state (one level deeper). + */ +static int omap_hsmmc_disable(struct mmc_host *mmc, int lazy) +{ + struct omap_hsmmc_host *host = mmc_priv(mmc); + + switch (host->dpm_state) { + case ENABLED: { + int delay; + + delay = omap_hsmmc_enabled_to_disabled(host); + if (lazy || delay < 0) + return delay; + return 0; + } + case DISABLED: + return omap_hsmmc_disabled_to_sleep(host); + case CARDSLEEP: + case REGSLEEP: + return omap_hsmmc_sleep_to_off(host); + default: + dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n"); + return -EINVAL; + } +} + +static int omap_hsmmc_enable_fclk(struct mmc_host *mmc) +{ + struct omap_hsmmc_host *host = mmc_priv(mmc); + int err; + + err = clk_enable(host->fclk); + if (err) + return err; + dev_dbg(mmc_dev(host->mmc), "mmc_fclk: enabled\n"); + omap_hsmmc_context_restore(host); + return 0; +} + +static int omap_hsmmc_disable_fclk(struct mmc_host *mmc, int lazy) +{ + struct omap_hsmmc_host *host = mmc_priv(mmc); + + omap_hsmmc_context_save(host); + clk_disable(host->fclk); + dev_dbg(mmc_dev(host->mmc), "mmc_fclk: disabled\n"); + return 0; +} + +static const struct mmc_host_ops omap_hsmmc_ops = { + .enable = omap_hsmmc_enable_fclk, + .disable = omap_hsmmc_disable_fclk, + .request = omap_hsmmc_request, + .set_ios = omap_hsmmc_set_ios, .get_cd = omap_hsmmc_get_cd, .get_ro = omap_hsmmc_get_ro, /* NYET -- enable_sdio_irq */ }; -static int __init omap_mmc_probe(struct platform_device *pdev) +static const struct mmc_host_ops omap_hsmmc_ps_ops = { + .enable = omap_hsmmc_enable, + .disable = omap_hsmmc_disable, + .request = omap_hsmmc_request, + .set_ios = omap_hsmmc_set_ios, + .get_cd = omap_hsmmc_get_cd, + .get_ro = omap_hsmmc_get_ro, + /* NYET -- enable_sdio_irq */ +}; + +#ifdef CONFIG_DEBUG_FS + +static int omap_hsmmc_regs_show(struct seq_file *s, void *data) +{ + struct mmc_host *mmc = s->private; + struct omap_hsmmc_host *host = mmc_priv(mmc); + int context_loss = 0; + + if (host->pdata->get_context_loss_count) + context_loss = host->pdata->get_context_loss_count(host->dev); + + seq_printf(s, "mmc%d:\n" + " enabled:\t%d\n" + " dpm_state:\t%d\n" + " nesting_cnt:\t%d\n" + " ctx_loss:\t%d:%d\n" + "\nregs:\n", + mmc->index, mmc->enabled ? 1 : 0, + host->dpm_state, mmc->nesting_cnt, + host->context_loss, context_loss); + + if (host->suspended || host->dpm_state == OFF) { + seq_printf(s, "host suspended, can't read registers\n"); + return 0; + } + + if (clk_enable(host->fclk) != 0) { + seq_printf(s, "can't read the regs\n"); + return 0; + } + + seq_printf(s, "SYSCONFIG:\t0x%08x\n", + OMAP_HSMMC_READ(host->base, SYSCONFIG)); + seq_printf(s, "CON:\t\t0x%08x\n", + OMAP_HSMMC_READ(host->base, CON)); + seq_printf(s, "HCTL:\t\t0x%08x\n", + OMAP_HSMMC_READ(host->base, HCTL)); + seq_printf(s, "SYSCTL:\t\t0x%08x\n", + OMAP_HSMMC_READ(host->base, SYSCTL)); + seq_printf(s, "IE:\t\t0x%08x\n", + OMAP_HSMMC_READ(host->base, IE)); + seq_printf(s, "ISE:\t\t0x%08x\n", + OMAP_HSMMC_READ(host->base, ISE)); + seq_printf(s, "CAPA:\t\t0x%08x\n", + OMAP_HSMMC_READ(host->base, CAPA)); + + clk_disable(host->fclk); + + return 0; +} + +static int omap_hsmmc_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, omap_hsmmc_regs_show, inode->i_private); +} + +static const struct file_operations mmc_regs_fops = { + .open = omap_hsmmc_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void omap_hsmmc_debugfs(struct mmc_host *mmc) +{ + if (mmc->debugfs_root) + debugfs_create_file("regs", S_IRUSR, mmc->debugfs_root, + mmc, &mmc_regs_fops); +} + +#else + +static void omap_hsmmc_debugfs(struct mmc_host *mmc) +{ +} + +#endif + +static int __init omap_hsmmc_probe(struct platform_device *pdev) { struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; struct mmc_host *mmc; - struct mmc_omap_host *host = NULL; + struct omap_hsmmc_host *host = NULL; struct resource *res; int ret = 0, irq; @@ -995,7 +1638,7 @@ static int __init omap_mmc_probe(struct platform_device *pdev) if (res == NULL) return -EBUSY; - mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev); + mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto err; @@ -1013,15 +1656,21 @@ static int __init omap_mmc_probe(struct platform_device *pdev) host->slot_id = 0; host->mapbase = res->start; host->base = ioremap(host->mapbase, SZ_4K); + host->power_mode = -1; platform_set_drvdata(pdev, host); - INIT_WORK(&host->mmc_carddetect_work, mmc_omap_detect); + INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect); + + if (mmc_slot(host).power_saving) + mmc->ops = &omap_hsmmc_ps_ops; + else + mmc->ops = &omap_hsmmc_ops; - mmc->ops = &mmc_omap_ops; mmc->f_min = 400000; mmc->f_max = 52000000; sema_init(&host->sem, 1); + spin_lock_init(&host->irq_lock); host->iclk = clk_get(&pdev->dev, "ick"); if (IS_ERR(host->iclk)) { @@ -1037,31 +1686,42 @@ static int __init omap_mmc_probe(struct platform_device *pdev) goto err1; } - if (clk_enable(host->fclk) != 0) { + omap_hsmmc_context_save(host); + + mmc->caps |= MMC_CAP_DISABLE; + mmc_set_disable_delay(mmc, OMAP_MMC_DISABLED_TIMEOUT); + /* we start off in DISABLED state */ + host->dpm_state = DISABLED; + + if (mmc_host_enable(host->mmc) != 0) { clk_put(host->iclk); clk_put(host->fclk); goto err1; } if (clk_enable(host->iclk) != 0) { - clk_disable(host->fclk); + mmc_host_disable(host->mmc); clk_put(host->iclk); clk_put(host->fclk); goto err1; } - host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); - /* - * MMC can still work without debounce clock. - */ - if (IS_ERR(host->dbclk)) - dev_warn(mmc_dev(host->mmc), "Failed to get debounce clock\n"); - else - if (clk_enable(host->dbclk) != 0) - dev_dbg(mmc_dev(host->mmc), "Enabling debounce" - " clk failed\n"); + if (cpu_is_omap2430()) { + host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); + /* + * MMC can still work without debounce clock. + */ + if (IS_ERR(host->dbclk)) + dev_warn(mmc_dev(host->mmc), + "Failed to get debounce clock\n"); else - host->dbclk_enabled = 1; + host->got_dbclk = 1; + + if (host->got_dbclk) + if (clk_enable(host->dbclk) != 0) + dev_dbg(mmc_dev(host->mmc), "Enabling debounce" + " clk failed\n"); + } /* Since we do only SG emulation, we can have as many segs * as we want. */ @@ -1073,14 +1733,18 @@ static int __init omap_mmc_probe(struct platform_device *pdev) mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; - mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; + mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | + MMC_CAP_WAIT_WHILE_BUSY; - if (pdata->slots[host->slot_id].wires >= 8) + if (mmc_slot(host).wires >= 8) mmc->caps |= MMC_CAP_8_BIT_DATA; - else if (pdata->slots[host->slot_id].wires >= 4) + else if (mmc_slot(host).wires >= 4) mmc->caps |= MMC_CAP_4_BIT_DATA; - omap_hsmmc_init(host); + if (mmc_slot(host).nonremovable) + mmc->caps |= MMC_CAP_NONREMOVABLE; + + omap_hsmmc_conf_bus_power(host); /* Select DMA lines */ switch (host->id) { @@ -1096,13 +1760,21 @@ static int __init omap_mmc_probe(struct platform_device *pdev) host->dma_line_tx = OMAP34XX_DMA_MMC3_TX; host->dma_line_rx = OMAP34XX_DMA_MMC3_RX; break; + case OMAP_MMC4_DEVID: + host->dma_line_tx = OMAP44XX_DMA_MMC4_TX; + host->dma_line_rx = OMAP44XX_DMA_MMC4_RX; + break; + case OMAP_MMC5_DEVID: + host->dma_line_tx = OMAP44XX_DMA_MMC5_TX; + host->dma_line_rx = OMAP44XX_DMA_MMC5_RX; + break; default: dev_err(mmc_dev(host->mmc), "Invalid MMC id\n"); goto err_irq; } /* Request IRQ for MMC operations */ - ret = request_irq(host->irq, mmc_omap_irq, IRQF_DISABLED, + ret = request_irq(host->irq, omap_hsmmc_irq, IRQF_DISABLED, mmc_hostname(mmc), host); if (ret) { dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n"); @@ -1112,7 +1784,8 @@ static int __init omap_mmc_probe(struct platform_device *pdev) /* initialize power supplies, gpios, etc */ if (pdata->init != NULL) { if (pdata->init(&pdev->dev) != 0) { - dev_dbg(mmc_dev(host->mmc), "late init error\n"); + dev_dbg(mmc_dev(host->mmc), + "Unable to configure MMC IRQs\n"); goto err_irq_cd_init; } } @@ -1121,7 +1794,7 @@ static int __init omap_mmc_probe(struct platform_device *pdev) /* Request IRQ for card detect */ if ((mmc_slot(host).card_detect_irq)) { ret = request_irq(mmc_slot(host).card_detect_irq, - omap_mmc_cd_handler, + omap_hsmmc_cd_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_DISABLED, mmc_hostname(mmc), host); @@ -1135,21 +1808,26 @@ static int __init omap_mmc_probe(struct platform_device *pdev) OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); + mmc_host_lazy_disable(host->mmc); + + omap_hsmmc_protect_card(host); + mmc_add_host(mmc); - if (host->pdata->slots[host->slot_id].name != NULL) { + if (mmc_slot(host).name != NULL) { ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name); if (ret < 0) goto err_slot_name; } - if (mmc_slot(host).card_detect_irq && - host->pdata->slots[host->slot_id].get_cover_state) { + if (mmc_slot(host).card_detect_irq && mmc_slot(host).get_cover_state) { ret = device_create_file(&mmc->class_dev, &dev_attr_cover_switch); if (ret < 0) goto err_cover_switch; } + omap_hsmmc_debugfs(mmc); + return 0; err_cover_switch: @@ -1161,11 +1839,11 @@ err_irq_cd: err_irq_cd_init: free_irq(host->irq, host); err_irq: - clk_disable(host->fclk); + mmc_host_disable(host->mmc); clk_disable(host->iclk); clk_put(host->fclk); clk_put(host->iclk); - if (host->dbclk_enabled) { + if (host->got_dbclk) { clk_disable(host->dbclk); clk_put(host->dbclk); } @@ -1180,12 +1858,13 @@ err: return ret; } -static int omap_mmc_remove(struct platform_device *pdev) +static int omap_hsmmc_remove(struct platform_device *pdev) { - struct mmc_omap_host *host = platform_get_drvdata(pdev); + struct omap_hsmmc_host *host = platform_get_drvdata(pdev); struct resource *res; if (host) { + mmc_host_enable(host->mmc); mmc_remove_host(host->mmc); if (host->pdata->cleanup) host->pdata->cleanup(&pdev->dev); @@ -1194,11 +1873,11 @@ static int omap_mmc_remove(struct platform_device *pdev) free_irq(mmc_slot(host).card_detect_irq, host); flush_scheduled_work(); - clk_disable(host->fclk); + mmc_host_disable(host->mmc); clk_disable(host->iclk); clk_put(host->fclk); clk_put(host->iclk); - if (host->dbclk_enabled) { + if (host->got_dbclk) { clk_disable(host->dbclk); clk_put(host->dbclk); } @@ -1216,36 +1895,51 @@ static int omap_mmc_remove(struct platform_device *pdev) } #ifdef CONFIG_PM -static int omap_mmc_suspend(struct platform_device *pdev, pm_message_t state) +static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state) { int ret = 0; - struct mmc_omap_host *host = platform_get_drvdata(pdev); + struct omap_hsmmc_host *host = platform_get_drvdata(pdev); if (host && host->suspended) return 0; if (host) { + host->suspended = 1; + if (host->pdata->suspend) { + ret = host->pdata->suspend(&pdev->dev, + host->slot_id); + if (ret) { + dev_dbg(mmc_dev(host->mmc), + "Unable to handle MMC board" + " level suspend\n"); + host->suspended = 0; + return ret; + } + } + cancel_work_sync(&host->mmc_carddetect_work); + mmc_host_enable(host->mmc); ret = mmc_suspend_host(host->mmc, state); if (ret == 0) { - host->suspended = 1; - OMAP_HSMMC_WRITE(host->base, ISE, 0); OMAP_HSMMC_WRITE(host->base, IE, 0); - if (host->pdata->suspend) { - ret = host->pdata->suspend(&pdev->dev, - host->slot_id); - if (ret) - dev_dbg(mmc_dev(host->mmc), - "Unable to handle MMC board" - " level suspend\n"); - } OMAP_HSMMC_WRITE(host->base, HCTL, - OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); - clk_disable(host->fclk); + OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); + mmc_host_disable(host->mmc); clk_disable(host->iclk); - clk_disable(host->dbclk); + if (host->got_dbclk) + clk_disable(host->dbclk); + } else { + host->suspended = 0; + if (host->pdata->resume) { + ret = host->pdata->resume(&pdev->dev, + host->slot_id); + if (ret) + dev_dbg(mmc_dev(host->mmc), + "Unmask interrupt failed\n"); + } + mmc_host_disable(host->mmc); } } @@ -1253,32 +1947,28 @@ static int omap_mmc_suspend(struct platform_device *pdev, pm_message_t state) } /* Routine to resume the MMC device */ -static int omap_mmc_resume(struct platform_device *pdev) +static int omap_hsmmc_resume(struct platform_device *pdev) { int ret = 0; - struct mmc_omap_host *host = platform_get_drvdata(pdev); + struct omap_hsmmc_host *host = platform_get_drvdata(pdev); if (host && !host->suspended) return 0; if (host) { - - ret = clk_enable(host->fclk); + ret = clk_enable(host->iclk); if (ret) goto clk_en_err; - ret = clk_enable(host->iclk); - if (ret) { - clk_disable(host->fclk); - clk_put(host->fclk); + if (mmc_host_enable(host->mmc) != 0) { + clk_disable(host->iclk); goto clk_en_err; } - if (clk_enable(host->dbclk) != 0) - dev_dbg(mmc_dev(host->mmc), - "Enabling debounce clk failed\n"); + if (host->got_dbclk) + clk_enable(host->dbclk); - omap_hsmmc_init(host); + omap_hsmmc_conf_bus_power(host); if (host->pdata->resume) { ret = host->pdata->resume(&pdev->dev, host->slot_id); @@ -1287,10 +1977,14 @@ static int omap_mmc_resume(struct platform_device *pdev) "Unmask interrupt failed\n"); } + omap_hsmmc_protect_card(host); + /* Notify the core to resume the host */ ret = mmc_resume_host(host->mmc); if (ret == 0) host->suspended = 0; + + mmc_host_lazy_disable(host->mmc); } return ret; @@ -1302,35 +1996,34 @@ clk_en_err: } #else -#define omap_mmc_suspend NULL -#define omap_mmc_resume NULL +#define omap_hsmmc_suspend NULL +#define omap_hsmmc_resume NULL #endif -static struct platform_driver omap_mmc_driver = { - .probe = omap_mmc_probe, - .remove = omap_mmc_remove, - .suspend = omap_mmc_suspend, - .resume = omap_mmc_resume, +static struct platform_driver omap_hsmmc_driver = { + .remove = omap_hsmmc_remove, + .suspend = omap_hsmmc_suspend, + .resume = omap_hsmmc_resume, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; -static int __init omap_mmc_init(void) +static int __init omap_hsmmc_init(void) { /* Register the MMC driver */ - return platform_driver_register(&omap_mmc_driver); + return platform_driver_register(&omap_hsmmc_driver); } -static void __exit omap_mmc_cleanup(void) +static void __exit omap_hsmmc_cleanup(void) { /* Unregister MMC driver */ - platform_driver_unregister(&omap_mmc_driver); + platform_driver_unregister(&omap_hsmmc_driver); } -module_init(omap_mmc_init); -module_exit(omap_mmc_cleanup); +module_init(omap_hsmmc_init); +module_exit(omap_hsmmc_cleanup); MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c index 1e8aa590bb3..01ab916c280 100644 --- a/drivers/mmc/host/sdhci-of.c +++ b/drivers/mmc/host/sdhci-of.c @@ -21,6 +21,7 @@ #include <linux/of.h> #include <linux/of_platform.h> #include <linux/mmc/host.h> +#include <asm/machdep.h> #include "sdhci.h" struct sdhci_of_data { @@ -48,6 +49,8 @@ struct sdhci_of_host { #define ESDHC_CLOCK_HCKEN 0x00000002 #define ESDHC_CLOCK_IPGEN 0x00000001 +#define ESDHC_HOST_CONTROL_RES 0x05 + static u32 esdhc_readl(struct sdhci_host *host, int reg) { return in_be32(host->ioaddr + reg); @@ -109,13 +112,17 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) int base = reg & ~0x3; int shift = (reg & 0x3) * 8; + /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */ + if (reg == SDHCI_HOST_CONTROL) + val &= ~ESDHC_HOST_CONTROL_RES; + clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift); } static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) { - int div; int pre_div = 2; + int div = 1; clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); @@ -123,19 +130,17 @@ static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) if (clock == 0) goto out; - if (host->max_clk / 16 > clock) { - for (; pre_div < 256; pre_div *= 2) { - if (host->max_clk / pre_div < clock * 16) - break; - } - } + while (host->max_clk / pre_div / 16 > clock && pre_div < 256) + pre_div *= 2; - for (div = 1; div <= 16; div++) { - if (host->max_clk / (div * pre_div) <= clock) - break; - } + while (host->max_clk / pre_div / div > clock && div < 16) + div++; + + dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", + clock, host->max_clk / pre_div / div); pre_div >>= 1; + div--; setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | @@ -165,19 +170,12 @@ static unsigned int esdhc_get_min_clock(struct sdhci_host *host) return of_host->clock / 256 / 16; } -static unsigned int esdhc_get_timeout_clock(struct sdhci_host *host) -{ - struct sdhci_of_host *of_host = sdhci_priv(host); - - return of_host->clock / 1000; -} - static struct sdhci_of_data sdhci_esdhc = { .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 | SDHCI_QUIRK_BROKEN_CARD_DETECTION | - SDHCI_QUIRK_INVERTED_WRITE_PROTECT | SDHCI_QUIRK_NO_BUSY_IRQ | SDHCI_QUIRK_NONSTANDARD_CLOCK | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_PIO_NEEDS_DELAY | SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | SDHCI_QUIRK_NO_CARD_NO_RESET, @@ -192,7 +190,6 @@ static struct sdhci_of_data sdhci_esdhc = { .enable_dma = esdhc_enable_dma, .get_max_clock = esdhc_get_max_clock, .get_min_clock = esdhc_get_min_clock, - .get_timeout_clock = esdhc_get_timeout_clock, }, }; @@ -219,6 +216,15 @@ static int sdhci_of_resume(struct of_device *ofdev) #endif +static bool __devinit sdhci_of_wp_inverted(struct device_node *np) +{ + if (of_get_property(np, "sdhci,wp-inverted", NULL)) + return true; + + /* Old device trees don't have the wp-inverted property. */ + return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds); +} + static int __devinit sdhci_of_probe(struct of_device *ofdev, const struct of_device_id *match) { @@ -261,6 +267,9 @@ static int __devinit sdhci_of_probe(struct of_device *ofdev, if (of_get_property(np, "sdhci,1-bit-only", NULL)) host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; + if (sdhci_of_wp_inverted(np)) + host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; + clk = of_get_property(np, "clock-frequency", &size); if (clk && size == sizeof(*clk) && *clk) of_host->clock = *clk; diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 2f15cc17d88..e0356644d1a 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -83,7 +83,8 @@ static int ricoh_probe(struct sdhci_pci_chip *chip) if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) chip->quirks |= SDHCI_QUIRK_CLOCK_BEFORE_RESET; - if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG) + if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG || + chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY) chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET; return 0; @@ -395,7 +396,7 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host) if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) && ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) && - (host->flags & SDHCI_USE_DMA)) { + (host->flags & SDHCI_USE_SDMA)) { dev_warn(&pdev->dev, "Will use DMA mode even though HW " "doesn't fully claim to support it.\n"); } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index fc96f8cb9c0..c279fbc4c2e 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -591,6 +591,9 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) target_timeout = data->timeout_ns / 1000 + data->timeout_clks / host->clock; + if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) + host->timeout_clk = host->clock / 1000; + /* * Figure out needed cycles. * We do this in steps in order to fit inside a 32 bit int. @@ -652,7 +655,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) count = sdhci_calc_timeout(host, data); sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); - if (host->flags & SDHCI_USE_DMA) + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) host->flags |= SDHCI_REQ_USE_DMA; /* @@ -991,8 +994,8 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) clk |= SDHCI_CLOCK_INT_EN; sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); - /* Wait max 10 ms */ - timeout = 10; + /* Wait max 20 ms */ + timeout = 20; while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) & SDHCI_CLOCK_INT_STABLE)) { if (timeout == 0) { @@ -1597,7 +1600,7 @@ int sdhci_resume_host(struct sdhci_host *host) { int ret; - if (host->flags & SDHCI_USE_DMA) { + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { if (host->ops->enable_dma) host->ops->enable_dma(host); } @@ -1678,23 +1681,20 @@ int sdhci_add_host(struct sdhci_host *host) caps = sdhci_readl(host, SDHCI_CAPABILITIES); if (host->quirks & SDHCI_QUIRK_FORCE_DMA) - host->flags |= SDHCI_USE_DMA; - else if (!(caps & SDHCI_CAN_DO_DMA)) - DBG("Controller doesn't have DMA capability\n"); + host->flags |= SDHCI_USE_SDMA; + else if (!(caps & SDHCI_CAN_DO_SDMA)) + DBG("Controller doesn't have SDMA capability\n"); else - host->flags |= SDHCI_USE_DMA; + host->flags |= SDHCI_USE_SDMA; if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && - (host->flags & SDHCI_USE_DMA)) { + (host->flags & SDHCI_USE_SDMA)) { DBG("Disabling DMA as it is marked broken\n"); - host->flags &= ~SDHCI_USE_DMA; + host->flags &= ~SDHCI_USE_SDMA; } - if (host->flags & SDHCI_USE_DMA) { - if ((host->version >= SDHCI_SPEC_200) && - (caps & SDHCI_CAN_DO_ADMA2)) - host->flags |= SDHCI_USE_ADMA; - } + if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2)) + host->flags |= SDHCI_USE_ADMA; if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && (host->flags & SDHCI_USE_ADMA)) { @@ -1702,13 +1702,14 @@ int sdhci_add_host(struct sdhci_host *host) host->flags &= ~SDHCI_USE_ADMA; } - if (host->flags & SDHCI_USE_DMA) { + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { if (host->ops->enable_dma) { if (host->ops->enable_dma(host)) { printk(KERN_WARNING "%s: No suitable DMA " "available. Falling back to PIO.\n", mmc_hostname(mmc)); - host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA); + host->flags &= + ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); } } } @@ -1736,7 +1737,7 @@ int sdhci_add_host(struct sdhci_host *host) * mask, but PIO does not need the hw shim so we set a new * mask here in that case. */ - if (!(host->flags & SDHCI_USE_DMA)) { + if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { host->dma_mask = DMA_BIT_MASK(64); mmc_dev(host->mmc)->dma_mask = &host->dma_mask; } @@ -1757,13 +1758,15 @@ int sdhci_add_host(struct sdhci_host *host) host->timeout_clk = (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; if (host->timeout_clk == 0) { - if (!host->ops->get_timeout_clock) { + if (host->ops->get_timeout_clock) { + host->timeout_clk = host->ops->get_timeout_clock(host); + } else if (!(host->quirks & + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { printk(KERN_ERR "%s: Hardware doesn't specify timeout clock " "frequency.\n", mmc_hostname(mmc)); return -ENODEV; } - host->timeout_clk = host->ops->get_timeout_clock(host); } if (caps & SDHCI_TIMEOUT_CLK_UNIT) host->timeout_clk *= 1000; @@ -1772,7 +1775,8 @@ int sdhci_add_host(struct sdhci_host *host) * Set host parameters. */ mmc->ops = &sdhci_ops; - if (host->ops->get_min_clock) + if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK && + host->ops->set_clock && host->ops->get_min_clock) mmc->f_min = host->ops->get_min_clock(host); else mmc->f_min = host->max_clk / 256; @@ -1810,7 +1814,7 @@ int sdhci_add_host(struct sdhci_host *host) */ if (host->flags & SDHCI_USE_ADMA) mmc->max_hw_segs = 128; - else if (host->flags & SDHCI_USE_DMA) + else if (host->flags & SDHCI_USE_SDMA) mmc->max_hw_segs = 1; else /* PIO */ mmc->max_hw_segs = 128; @@ -1893,10 +1897,10 @@ int sdhci_add_host(struct sdhci_host *host) mmc_add_host(mmc); - printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n", + printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n", mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), - (host->flags & SDHCI_USE_ADMA)?"A":"", - (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); + (host->flags & SDHCI_USE_ADMA) ? "ADMA" : + (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); sdhci_enable_card_detection(host); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index c77e9ff3022..ce5f1d73dc0 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -143,7 +143,7 @@ #define SDHCI_CAN_DO_ADMA2 0x00080000 #define SDHCI_CAN_DO_ADMA1 0x00100000 #define SDHCI_CAN_DO_HISPD 0x00200000 -#define SDHCI_CAN_DO_DMA 0x00400000 +#define SDHCI_CAN_DO_SDMA 0x00400000 #define SDHCI_CAN_VDD_330 0x01000000 #define SDHCI_CAN_VDD_300 0x02000000 #define SDHCI_CAN_VDD_180 0x04000000 @@ -232,6 +232,8 @@ struct sdhci_host { #define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22) /* Controller needs 10ms delay between applying power and clock */ #define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23) +/* Controller uses SDCLK instead of TMCLK for data timeouts */ +#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24) int irq; /* Device IRQ */ void __iomem * ioaddr; /* Mapped address */ @@ -250,7 +252,7 @@ struct sdhci_host { spinlock_t lock; /* Mutex */ int flags; /* Host attributes */ -#define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */ +#define SDHCI_USE_SDMA (1<<0) /* Host is SDMA capable */ #define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */ #define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */ #define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */ |