aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@infradead.org>2008-07-24 14:05:50 -0300
committerMauro Carvalho Chehab <mchehab@infradead.org>2008-07-24 14:05:50 -0300
commit698158c799c7961824ccdb773250e16c0dd5d9e4 (patch)
tree96f034be9e9a95e6f68b5d20d1a3df7a3cbf724a /drivers
parent4c7827eec78abe6fe68fd29305806467f2a51e63 (diff)
parent338b9bb3adac0d2c5a1e180491d9b001d624c402 (diff)
Merge ../linux-2.6
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_throttling.c17
-rw-r--r--drivers/base/cpu.c4
-rw-r--r--drivers/char/nvram.c1
-rw-r--r--drivers/char/tty_io.c2
-rw-r--r--drivers/cpufreq/cpufreq.c14
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c4
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c79
-rw-r--r--drivers/dca/dca-core.c131
-rw-r--r--drivers/dca/dca-sysfs.c3
-rw-r--r--drivers/dma/Kconfig37
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/dmaengine.c35
-rw-r--r--drivers/dma/dmatest.c444
-rw-r--r--drivers/dma/dw_dmac.c1122
-rw-r--r--drivers/dma/dw_dmac_regs.h225
-rw-r--r--drivers/dma/fsldma.c38
-rw-r--r--drivers/dma/ioat.c15
-rw-r--r--drivers/dma/ioat_dca.c244
-rw-r--r--drivers/dma/ioat_dma.c402
-rw-r--r--drivers/dma/ioatdma.h28
-rw-r--r--drivers/dma/ioatdma_hw.h1
-rw-r--r--drivers/dma/ioatdma_registers.h20
-rw-r--r--drivers/dma/iop-adma.c53
-rw-r--r--drivers/dma/mv_xor.c1375
-rw-r--r--drivers/dma/mv_xor.h183
-rw-r--r--drivers/firmware/dcdbas.c3
-rw-r--r--drivers/hid/hid-core.c10
-rw-r--r--drivers/hid/hid-input-quirks.c40
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hidraw.c48
-rw-r--r--drivers/hid/usbhid/hid-quirks.c22
-rw-r--r--drivers/hid/usbhid/hiddev.c14
-rw-r--r--drivers/hid/usbhid/usbkbd.c10
-rw-r--r--drivers/hid/usbhid/usbmouse.c8
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/ide/arm/icside.c71
-rw-r--r--drivers/ide/arm/ide_arm.c14
-rw-r--r--drivers/ide/arm/palm_bk3710.c30
-rw-r--r--drivers/ide/arm/rapide.c24
-rw-r--r--drivers/ide/h8300/ide-h8300.c48
-rw-r--r--drivers/ide/ide-atapi.c58
-rw-r--r--drivers/ide/ide-cd.c157
-rw-r--r--drivers/ide/ide-cd.h38
-rw-r--r--drivers/ide/ide-cd_ioctl.c35
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-dma.c103
-rw-r--r--drivers/ide/ide-floppy.c90
-rw-r--r--drivers/ide/ide-generic.c73
-rw-r--r--drivers/ide/ide-io.c42
-rw-r--r--drivers/ide/ide-iops.c230
-rw-r--r--drivers/ide/ide-lib.c17
-rw-r--r--drivers/ide/ide-pnp.c29
-rw-r--r--drivers/ide/ide-probe.c366
-rw-r--r--drivers/ide/ide-proc.c4
-rw-r--r--drivers/ide/ide-tape.c127
-rw-r--r--drivers/ide/ide-taskfile.c38
-rw-r--r--drivers/ide/ide.c49
-rw-r--r--drivers/ide/legacy/buddha.c24
-rw-r--r--drivers/ide/legacy/falconide.c56
-rw-r--r--drivers/ide/legacy/gayle.c39
-rw-r--r--drivers/ide/legacy/ide-4drives.c20
-rw-r--r--drivers/ide/legacy/ide-cs.c54
-rw-r--r--drivers/ide/legacy/ide_platform.c32
-rw-r--r--drivers/ide/legacy/macide.c15
-rw-r--r--drivers/ide/legacy/q40ide.c47
-rw-r--r--drivers/ide/mips/au1xxx-ide.c56
-rw-r--r--drivers/ide/mips/swarm.c24
-rw-r--r--drivers/ide/pci/aec62xx.c5
-rw-r--r--drivers/ide/pci/alim15x3.c12
-rw-r--r--drivers/ide/pci/amd74xx.c1
-rw-r--r--drivers/ide/pci/cmd640.c29
-rw-r--r--drivers/ide/pci/cmd64x.c12
-rw-r--r--drivers/ide/pci/cs5520.c41
-rw-r--r--drivers/ide/pci/cs5535.c3
-rw-r--r--drivers/ide/pci/delkin_cb.c25
-rw-r--r--drivers/ide/pci/hpt34x.c1
-rw-r--r--drivers/ide/pci/hpt366.c23
-rw-r--r--drivers/ide/pci/ns87415.c115
-rw-r--r--drivers/ide/pci/pdc202xx_old.c3
-rw-r--r--drivers/ide/pci/piix.c4
-rw-r--r--drivers/ide/pci/scc_pata.c139
-rw-r--r--drivers/ide/pci/serverworks.c4
-rw-r--r--drivers/ide/pci/sgiioc4.c65
-rw-r--r--drivers/ide/pci/siimage.c6
-rw-r--r--drivers/ide/pci/sl82c105.c4
-rw-r--r--drivers/ide/pci/tc86c001.c16
-rw-r--r--drivers/ide/pci/via82cxxx.c1
-rw-r--r--drivers/ide/ppc/pmac.c222
-rw-r--r--drivers/ide/setup-pci.c109
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c4
-rw-r--r--drivers/input/keyboard/tosakbd.c2
-rw-r--r--drivers/mfd/Kconfig11
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/mfd-core.c114
-rw-r--r--drivers/mfd/tc6393xb.c600
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c3
-rw-r--r--drivers/mmc/card/mmc_test.c225
-rw-r--r--drivers/mmc/card/queue.c97
-rw-r--r--drivers/mmc/host/au1xmmc.c54
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/s3cmci.c50
-rw-r--r--drivers/mmc/host/sdhci.c167
-rw-r--r--drivers/mmc/host/sdhci.h7
-rw-r--r--drivers/mtd/nand/cmx270_nand.c79
-rw-r--r--drivers/net/smc91x.c94
-rw-r--r--drivers/net/smc91x.h76
-rw-r--r--drivers/pcmcia/Kconfig3
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c93
-rw-r--r--drivers/pcmcia/pxa2xx_palmtx.c118
-rw-r--r--drivers/power/Kconfig6
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/palmtx_battery.c198
-rw-r--r--drivers/scsi/ide-scsi.c32
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c95
-rw-r--r--drivers/serial/mpsc.c148
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c6
-rw-r--r--drivers/video/pxafb.c64
119 files changed, 7834 insertions, 2014 deletions
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 0622ace0522..a2c3f9cfa54 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -827,6 +827,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
static int acpi_processor_get_throttling(struct acpi_processor *pr)
{
cpumask_t saved_mask;
+ cpumask_of_cpu_ptr_declare(new_mask);
int ret;
if (!pr)
@@ -838,7 +839,8 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
* Migrate task to the cpu pointed by pr.
*/
saved_mask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
+ cpumask_of_cpu_ptr_next(new_mask, pr->id);
+ set_cpus_allowed_ptr(current, new_mask);
ret = pr->throttling.acpi_processor_get_throttling(pr);
/* restore the previous state */
set_cpus_allowed_ptr(current, &saved_mask);
@@ -987,6 +989,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
{
cpumask_t saved_mask;
+ cpumask_of_cpu_ptr_declare(new_mask);
int ret = 0;
unsigned int i;
struct acpi_processor *match_pr;
@@ -1013,7 +1016,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* affected cpu in order to get one proper T-state.
* The notifier event is THROTTLING_PRECHANGE.
*/
- for_each_cpu_mask(i, online_throttling_cpus) {
+ for_each_cpu_mask_nr(i, online_throttling_cpus) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
&t_state);
@@ -1025,7 +1028,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* it can be called only for the cpu pointed by pr.
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
+ cpumask_of_cpu_ptr_next(new_mask, pr->id);
+ set_cpus_allowed_ptr(current, new_mask);
ret = p_throttling->acpi_processor_set_throttling(pr,
t_state.target_state);
} else {
@@ -1034,7 +1038,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* it is necessary to set T-state for every affected
* cpus.
*/
- for_each_cpu_mask(i, online_throttling_cpus) {
+ for_each_cpu_mask_nr(i, online_throttling_cpus) {
match_pr = per_cpu(processors, i);
/*
* If the pointer is invalid, we will report the
@@ -1056,7 +1060,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
continue;
}
t_state.cpu = i;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
+ cpumask_of_cpu_ptr_next(new_mask, i);
+ set_cpus_allowed_ptr(current, new_mask);
ret = match_pr->throttling.
acpi_processor_set_throttling(
match_pr, t_state.target_state);
@@ -1068,7 +1073,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* affected cpu to update the T-states.
* The notifier event is THROTTLING_POSTCHANGE
*/
- for_each_cpu_mask(i, online_throttling_cpus) {
+ for_each_cpu_mask_nr(i, online_throttling_cpus) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
&t_state);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 20537d50790..64f5d54f7ed 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -121,14 +121,14 @@ static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \
{ \
return print_cpus_map(buf, &cpu_##type##_map); \
} \
-struct sysdev_class_attribute attr_##type##_map = \
+static struct sysdev_class_attribute attr_##type##_map = \
_SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL)
print_cpus_func(online);
print_cpus_func(possible);
print_cpus_func(present);
-struct sysdev_class_attribute *cpu_state_attr[] = {
+static struct sysdev_class_attribute *cpu_state_attr[] = {
&attr_online_map,
&attr_possible_map,
&attr_present_map,
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index a22662b6a1a..39f6357e3b5 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -107,7 +107,6 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
-#include <linux/smp_lock.h>
#include <asm/io.h>
#include <asm/uaccess.h>
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 15e597d0300..fa48dba5ba5 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -915,7 +915,7 @@ static void tty_reset_termios(struct tty_struct *tty)
* do_tty_hangup - actual handler for hangup events
* @work: tty device
*
-k * This can be called by the "eventd" kernel thread. That is process
+ * This can be called by the "eventd" kernel thread. That is process
* synchronous but doesn't hold any locks, so we need to make sure we
* have the appropriate locks for what we're doing.
*
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ee1df0d45e8..8d6a3ff0267 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -589,7 +589,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf)
ssize_t i = 0;
unsigned int cpu;
- for_each_cpu_mask(cpu, mask) {
+ for_each_cpu_mask_nr(cpu, mask) {
if (i)
i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
@@ -835,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
}
#endif
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_cpu_mask_nr(j, policy->cpus) {
if (cpu == j)
continue;
@@ -898,14 +898,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
}
spin_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_cpu_mask_nr(j, policy->cpus) {
per_cpu(cpufreq_cpu_data, j) = policy;
per_cpu(policy_cpu, j) = policy->cpu;
}
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
/* symlink affected CPUs */
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_cpu_mask_nr(j, policy->cpus) {
if (j == cpu)
continue;
if (!cpu_online(j))
@@ -945,7 +945,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
err_out_unregister:
spin_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_cpu_mask(j, policy->cpus)
+ for_each_cpu_mask_nr(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = NULL;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -1028,7 +1028,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
* the sysfs links afterwards.
*/
if (unlikely(cpus_weight(data->cpus) > 1)) {
- for_each_cpu_mask(j, data->cpus) {
+ for_each_cpu_mask_nr(j, data->cpus) {
if (j == cpu)
continue;
per_cpu(cpufreq_cpu_data, j) = NULL;
@@ -1038,7 +1038,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (unlikely(cpus_weight(data->cpus) > 1)) {
- for_each_cpu_mask(j, data->cpus) {
+ for_each_cpu_mask_nr(j, data->cpus) {
if (j == cpu)
continue;
dprintk("removing link for cpu %u\n", j);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 5d3a04ba6ad..fe565ee4375 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -497,7 +497,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return rc;
}
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_cpu_mask_nr(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index d2af20dda38..33855cb3cf1 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -367,7 +367,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
/* Get Idle Time */
idle_ticks = UINT_MAX;
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_cpu_mask_nr(j, policy->cpus) {
cputime64_t total_idle_ticks;
unsigned int tmp_idle_ticks;
struct cpu_dbs_info_s *j_dbs_info;
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return rc;
}
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_cpu_mask_nr(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index cb2ac01a41a..32244aa7cc0 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -30,16 +30,18 @@
/**
* A few values needed by the userspace governor
*/
-static unsigned int cpu_max_freq[NR_CPUS];
-static unsigned int cpu_min_freq[NR_CPUS];
-static unsigned int cpu_cur_freq[NR_CPUS]; /* current CPU freq */
-static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */
-static unsigned int cpu_is_managed[NR_CPUS];
+static DEFINE_PER_CPU(unsigned int, cpu_max_freq);
+static DEFINE_PER_CPU(unsigned int, cpu_min_freq);
+static DEFINE_PER_CPU(unsigned int, cpu_cur_freq); /* current CPU freq */
+static DEFINE_PER_CPU(unsigned int, cpu_set_freq); /* CPU freq desired by
+ userspace */
+static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
static DEFINE_MUTEX (userspace_mutex);
static int cpus_using_userspace_governor;
-#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
+#define dprintk(msg...) \
+ cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
/* keep track of frequency transitions */
static int
@@ -48,12 +50,12 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
{
struct cpufreq_freqs *freq = data;
- if (!cpu_is_managed[freq->cpu])
+ if (!per_cpu(cpu_is_managed, freq->cpu))
return 0;
dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n",
freq->cpu, freq->new);
- cpu_cur_freq[freq->cpu] = freq->new;
+ per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
return 0;
}
@@ -77,15 +79,15 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
mutex_lock(&userspace_mutex);
- if (!cpu_is_managed[policy->cpu])
+ if (!per_cpu(cpu_is_managed, policy->cpu))
goto err;
- cpu_set_freq[policy->cpu] = freq;
+ per_cpu(cpu_set_freq, policy->cpu) = freq;
- if (freq < cpu_min_freq[policy->cpu])
- freq = cpu_min_freq[policy->cpu];
- if (freq > cpu_max_freq[policy->cpu])
- freq = cpu_max_freq[policy->cpu];
+ if (freq < per_cpu(cpu_min_freq, policy->cpu))
+ freq = per_cpu(cpu_min_freq, policy->cpu);
+ if (freq > per_cpu(cpu_max_freq, policy->cpu))
+ freq = per_cpu(cpu_max_freq, policy->cpu);
/*
* We're safe from concurrent calls to ->target() here
@@ -104,7 +106,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
{
- return sprintf(buf, "%u\n", cpu_cur_freq[policy->cpu]);
+ return sprintf(buf, "%u\n", per_cpu(cpu_cur_freq, policy->cpu));
}
static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
@@ -127,12 +129,17 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
}
cpus_using_userspace_governor++;
- cpu_is_managed[cpu] = 1;
- cpu_min_freq[cpu] = policy->min;
- cpu_max_freq[cpu] = policy->max;
- cpu_cur_freq[cpu] = policy->cur;
- cpu_set_freq[cpu] = policy->cur;
- dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]);
+ per_cpu(cpu_is_managed, cpu) = 1;
+ per_cpu(cpu_min_freq, cpu) = policy->min;
+ per_cpu(cpu_max_freq, cpu) = policy->max;
+ per_cpu(cpu_cur_freq, cpu) = policy->cur;
+ per_cpu(cpu_set_freq, cpu) = policy->cur;
+ dprintk("managing cpu %u started "
+ "(%u - %u kHz, currently %u kHz)\n",
+ cpu,
+ per_cpu(cpu_min_freq, cpu),
+ per_cpu(cpu_max_freq, cpu),
+ per_cpu(cpu_cur_freq, cpu));
mutex_unlock(&userspace_mutex);
break;
@@ -145,34 +152,34 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
CPUFREQ_TRANSITION_NOTIFIER);
}
- cpu_is_managed[cpu] = 0;
- cpu_min_freq[cpu] = 0;
- cpu_max_freq[cpu] = 0;
- cpu_set_freq[cpu] = 0;
+ per_cpu(cpu_is_managed, cpu) = 0;
+ per_cpu(cpu_min_freq, cpu) = 0;
+ per_cpu(cpu_max_freq, cpu) = 0;
+ per_cpu(cpu_set_freq, cpu) = 0;
dprintk("managing cpu %u stopped\n", cpu);
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_LIMITS:
mutex_lock(&userspace_mutex);
- dprintk("limit event for cpu %u: %u - %u kHz,"
+ dprintk("limit event for cpu %u: %u - %u kHz, "
"currently %u kHz, last set to %u kHz\n",
cpu, policy->min, policy->max,
- cpu_cur_freq[cpu], cpu_set_freq[cpu]);
- if (policy->max < cpu_set_freq[cpu]) {
+ per_cpu(cpu_cur_freq, cpu),
+ per_cpu(cpu_set_freq, cpu));
+ if (policy->max < per_cpu(cpu_set_freq, cpu)) {
__cpufreq_driver_target(policy, policy->max,
CPUFREQ_RELATION_H);
- }
- else if (policy->min > cpu_set_freq[cpu]) {
+ } else if (policy->min > per_cpu(cpu_set_freq, cpu)) {
__cpufreq_driver_target(policy, policy->min,
CPUFREQ_RELATION_L);
- }
- else {
- __cpufreq_driver_target(policy, cpu_set_freq[cpu],
+ } else {
+ __cpufreq_driver_target(policy,
+ per_cpu(cpu_set_freq, cpu),
CPUFREQ_RELATION_L);
}
- cpu_min_freq[cpu] = policy->min;
- cpu_max_freq[cpu] = policy->max;
- cpu_cur_freq[cpu] = policy->cur;
+ per_cpu(cpu_min_freq, cpu) = policy->min;
+ per_cpu(cpu_max_freq, cpu) = policy->max;
+ per_cpu(cpu_cur_freq, cpu) = policy->cur;
mutex_unlock(&userspace_mutex);
break;
}
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index bf5b92f86df..ec249d2db63 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -28,13 +28,29 @@
#include <linux/device.h>
#include <linux/dca.h>
-MODULE_LICENSE("GPL");
+#define DCA_VERSION "1.4"
-/* For now we're assuming a single, global, DCA provider for the system. */
+MODULE_VERSION(DCA_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel Corporation");
static DEFINE_SPINLOCK(dca_lock);
-static struct dca_provider *global_dca = NULL;
+static LIST_HEAD(dca_providers);
+
+static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
+{
+ struct dca_provider *dca, *ret = NULL;
+
+ list_for_each_entry(dca, &dca_providers, node) {
+ if ((!dev) || (dca->ops->dev_managed(dca, dev))) {
+ ret = dca;
+ break;
+ }
+ }
+
+ return ret;
+}
/**
* dca_add_requester - add a dca client to the list
@@ -42,25 +58,39 @@ static struct dca_provider *global_dca = NULL;
*/
int dca_add_requester(struct device *dev)
{
- int err, slot;
+ struct dca_provider *dca;
+ int err, slot = -ENODEV;
- if (!global_dca)
- return -ENODEV;
+ if (!dev)
+ return -EFAULT;
spin_lock(&dca_lock);
- slot = global_dca->ops->add_requester(global_dca, dev);
- spin_unlock(&dca_lock);
- if (slot < 0)
+
+ /* check if the requester has not been added already */
+ dca = dca_find_provider_by_dev(dev);
+ if (dca) {
+ spin_unlock(&dca_lock);
+ return -EEXIST;
+ }
+
+ list_for_each_entry(dca, &dca_providers, node) {
+ slot = dca->ops->add_requester(dca, dev);
+ if (slot >= 0)
+ break;
+ }
+ if (slot < 0) {
+ spin_unlock(&dca_lock);
return slot;
+ }
- err = dca_sysfs_add_req(global_dca, dev, slot);
+ err = dca_sysfs_add_req(dca, dev, slot);
if (err) {
- spin_lock(&dca_lock);
- global_dca->ops->remove_requester(global_dca, dev);
+ dca->ops->remove_requester(dca, dev);
spin_unlock(&dca_lock);
return err;
}
+ spin_unlock(&dca_lock);
return 0;
}
EXPORT_SYMBOL_GPL(dca_add_requester);
@@ -71,30 +101,78 @@ EXPORT_SYMBOL_GPL(dca_add_requester);
*/
int dca_remove_requester(struct device *dev)
{
+ struct dca_provider *dca;
int slot;
- if (!global_dca)
- return -ENODEV;
+
+ if (!dev)
+ return -EFAULT;
spin_lock(&dca_lock);
- slot = global_dca->ops->remove_requester(global_dca, dev);
- spin_unlock(&dca_lock);
- if (slot < 0)
+ dca = dca_find_provider_by_dev(dev);
+ if (!dca) {
+ spin_unlock(&dca_lock);
+ return -ENODEV;
+ }
+ slot = dca->ops->remove_requester(dca, dev);
+ if (slot < 0) {
+ spin_unlock(&dca_lock);
return slot;
+ }
- dca_sysfs_remove_req(global_dca, slot);
+ dca_sysfs_remove_req(dca, slot);
+
+ spin_unlock(&dca_lock);
return 0;
}
EXPORT_SYMBOL_GPL(dca_remove_requester);
/**
- * dca_get_tag - return the dca tag for the given cpu
+ * dca_common_get_tag - return the dca tag (serves both new and old api)
+ * @dev - the device that wants dca service
* @cpu - the cpuid as returned by get_cpu()
*/
-u8 dca_get_tag(int cpu)
+u8 dca_common_get_tag(struct device *dev, int cpu)
{
- if (!global_dca)
+ struct dca_provider *dca;
+ u8 tag;
+
+ spin_lock(&dca_lock);
+
+ dca = dca_find_provider_by_dev(dev);
+ if (!dca) {
+ spin_unlock(&dca_lock);
return -ENODEV;
- return global_dca->ops->get_tag(global_dca, cpu);
+ }
+ tag = dca->ops->get_tag(dca, dev, cpu);
+
+ spin_unlock(&dca_lock);
+ return tag;
+}
+
+/**
+ * dca3_get_tag - return the dca tag to the requester device
+ * for the given cpu (new api)
+ * @dev - the device that wants dca service
+ * @cpu - the cpuid as returned by get_cpu()
+ */
+u8 dca3_get_tag(struct device *dev, int cpu)
+{
+ if (!dev)
+ return -EFAULT;
+
+ return dca_common_get_tag(dev, cpu);
+}
+EXPORT_SYMBOL_GPL(dca3_get_tag);
+
+/**
+ * dca_get_tag - return the dca tag for the given cpu (old api)
+ * @cpu - the cpuid as returned by get_cpu()
+ */
+u8 dca_get_tag(int cpu)
+{
+ struct device *dev = NULL;
+
+ return dca_common_get_tag(dev, cpu);
}
EXPORT_SYMBOL_GPL(dca_get_tag);
@@ -140,12 +218,10 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
{
int err;
- if (global_dca)
- return -EEXIST;
err = dca_sysfs_add_provider(dca, dev);
if (err)
return err;
- global_dca = dca;
+ list_add(&dca->node, &dca_providers);
blocking_notifier_call_chain(&dca_provider_chain,
DCA_PROVIDER_ADD, NULL);
return 0;
@@ -158,11 +234,9 @@ EXPORT_SYMBOL_GPL(register_dca_provider);
*/
void unregister_dca_provider(struct dca_provider *dca)
{
- if (!global_dca)
- return;
blocking_notifier_call_chain(&dca_provider_chain,
DCA_PROVIDER_REMOVE, NULL);
- global_dca = NULL;
+ list_del(&dca->node);
dca_sysfs_remove_provider(dca);
}
EXPORT_SYMBOL_GPL(unregister_dca_provider);
@@ -187,6 +261,7 @@ EXPORT_SYMBOL_GPL(dca_unregister_notify);
static int __init dca_init(void)
{
+ printk(KERN_ERR "dca service started, version %s\n", DCA_VERSION);
return dca_sysfs_init();
}
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c
index 9a70377bfb3..7af4b403bd2 100644
--- a/drivers/dca/dca-sysfs.c
+++ b/drivers/dca/dca-sysfs.c
@@ -13,10 +13,11 @@ static spinlock_t dca_idr_lock;
int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot)
{
struct device *cd;
+ static int req_count;
cd = device_create_drvdata(dca_class, dca->cd,
MKDEV(0, slot + 1), NULL,
- "requester%d", slot);
+ "requester%d", req_count++);
if (IS_ERR(cd))
return PTR_ERR(cd);
return 0;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6239c3df30a..cd303901eb5 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -4,13 +4,14 @@
menuconfig DMADEVICES
bool "DMA Engine support"
- depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC
- depends on !HIGHMEM64G
+ depends on !HIGHMEM64G && HAS_DMA
help
DMA engines can do asynchronous data transfers without
involving the host CPU. Currently, this framework can be
used to offload memory copies in the network stack and
- RAID operations in the MD driver.
+ RAID operations in the MD driver. This menu only presents
+ DMA Device drivers supported by the configured arch, it may
+ be empty in some cases.
if DMADEVICES
@@ -37,6 +38,15 @@ config INTEL_IOP_ADMA
help
Enable support for the Intel(R) IOP Series RAID engines.
+config DW_DMAC
+ tristate "Synopsys DesignWare AHB DMA support"
+ depends on AVR32
+ select DMA_ENGINE
+ default y if CPU_AT32AP7000
+ help
+ Support the Synopsys DesignWare AHB DMA controller. This
+ can be integrated in chips such as the Atmel AT32ap7000.
+
config FSL_DMA
bool "Freescale MPC85xx/MPC83xx DMA support"
depends on PPC
@@ -46,6 +56,14 @@ config FSL_DMA
MPC8560/40, MPC8555, MPC8548 and MPC8641 processors.
The MPC8349, MPC8360 is also supported.
+config MV_XOR
+ bool "Marvell XOR engine support"
+ depends on PLAT_ORION
+ select ASYNC_CORE
+ select DMA_ENGINE
+ ---help---
+ Enable support for the Marvell XOR engine.
+
config DMA_ENGINE
bool
@@ -55,10 +73,19 @@ comment "DMA Clients"
config NET_DMA
bool "Network: TCP receive copy offload"
depends on DMA_ENGINE && NET
+ default (INTEL_IOATDMA || FSL_DMA)
help
This enables the use of DMA engines in the network stack to
offload receive copy-to-user operations, freeing CPU cycles.
- Since this is the main user of the DMA engine, it should be enabled;
- say Y here.
+
+ Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
+ say N.
+
+config DMATEST
+ tristate "DMA Test client"
+ depends on DMA_ENGINE
+ help
+ Simple DMA test client. Say N unless you're debugging a
+ DMA Device driver.
endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index c8036d94590..14f59527d4f 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,6 +1,9 @@
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_NET_DMA) += iovlock.o
+obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
+obj-$(CONFIG_MV_XOR) += mv_xor.o
+obj-$(CONFIG_DW_DMAC) += dw_dmac.o
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 97b329e7679..dc003a3a787 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -169,12 +169,18 @@ static void dma_client_chan_alloc(struct dma_client *client)
enum dma_state_client ack;
/* Find a channel */
- list_for_each_entry(device, &dma_device_list, global_node)
+ list_for_each_entry(device, &dma_device_list, global_node) {
+ /* Does the client require a specific DMA controller? */
+ if (client->slave && client->slave->dma_dev
+ && client->slave->dma_dev != device->dev)
+ continue;
+
list_for_each_entry(chan, &device->channels, device_node) {
if (!dma_chan_satisfies_mask(chan, client->cap_mask))
continue;
- desc = chan->device->device_alloc_chan_resources(chan);
+ desc = chan->device->device_alloc_chan_resources(
+ chan, client);
if (desc >= 0) {
ack = client->event_callback(client,
chan,
@@ -183,12 +189,14 @@ static void dma_client_chan_alloc(struct dma_client *client)
/* we are done once this client rejects
* an available resource
*/
- if (ack == DMA_ACK)
+ if (ack == DMA_ACK) {
dma_chan_get(chan);
- else if (ack == DMA_NAK)
+ chan->client_count++;
+ } else if (ack == DMA_NAK)
return;
}
}
+ }
}
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -272,8 +280,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan)
/* client was holding resources for this channel so
* free it
*/
- if (ack == DMA_ACK)
+ if (ack == DMA_ACK) {
dma_chan_put(chan);
+ chan->client_count--;
+ }
}
mutex_unlock(&dma_list_mutex);
@@ -285,6 +295,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan)
*/
void dma_async_client_register(struct dma_client *client)
{
+ /* validate client data */
+ BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
+ !client->slave);
+
mutex_lock(&dma_list_mutex);
list_add_tail(&client->global_node, &dma_client_list);
mutex_unlock(&dma_list_mutex);
@@ -313,8 +327,10 @@ void dma_async_client_unregister(struct dma_client *client)
ack = client->event_callback(client, chan,
DMA_RESOURCE_REMOVED);
- if (ack == DMA_ACK)
+ if (ack == DMA_ACK) {
dma_chan_put(chan);
+ chan->client_count--;
+ }
}
list_del(&client->global_node);
@@ -359,6 +375,10 @@ int dma_async_device_register(struct dma_device *device)
!device->device_prep_dma_memset);
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
!device->device_prep_dma_interrupt);
+ BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
+ !device->device_prep_slave_sg);
+ BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
+ !device->device_terminate_all);
BUG_ON(!device->device_alloc_chan_resources);
BUG_ON(!device->device_free_chan_resources);
@@ -378,7 +398,7 @@ int dma_async_device_register(struct dma_device *device)
chan->chan_id = chancnt++;
chan->dev.class = &dma_devclass;
- chan->dev.parent = NULL;
+ chan->dev.parent = device->dev;
snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d",
device->dev_id, chan->chan_id);
@@ -394,6 +414,7 @@ int dma_async_device_register(struct dma_device *device)
kref_get(&device->refcount);
kref_get(&device->refcount);
kref_init(&chan->refcount);
+ chan->client_count = 0;
chan->slow_ref = 0;
INIT_RCU_HEAD(&chan->rcu);
}
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
new file mode 100644
index 00000000000..a08d1970474
--- /dev/null
+++ b/drivers/dma/dmatest.c
@@ -0,0 +1,444 @@
+/*
+ * DMA Engine test module
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/random.h>
+#include <linux/wait.h>
+
+static unsigned int test_buf_size = 16384;
+module_param(test_buf_size, uint, S_IRUGO);
+MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
+
+static char test_channel[BUS_ID_SIZE];
+module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
+MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
+
+static char test_device[BUS_ID_SIZE];
+module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
+MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
+
+static unsigned int threads_per_chan = 1;
+module_param(threads_per_chan, uint, S_IRUGO);
+MODULE_PARM_DESC(threads_per_chan,
+ "Number of threads to start per channel (default: 1)");
+
+static unsigned int max_channels;
+module_param(max_channels, uint, S_IRUGO);
+MODULE_PARM_DESC(nr_channels,
+ "Maximum number of channels to use (default: all)");
+
+/*
+ * Initialization patterns. All bytes in the source buffer has bit 7
+ * set, all bytes in the destination buffer has bit 7 cleared.
+ *
+ * Bit 6 is set for all bytes which are to be copied by the DMA
+ * engine. Bit 5 is set for all bytes which are to be overwritten by
+ * the DMA engine.
+ *
+ * The remaining bits are the inverse of a counter which increments by
+ * one for each byte address.
+ */
+#define PATTERN_SRC 0x80
+#define PATTERN_DST 0x00
+#define PATTERN_COPY 0x40
+#define PATTERN_OVERWRITE 0x20
+#define PATTERN_COUNT_MASK 0x1f
+
+struct dmatest_thread {
+ struct list_head node;
+ struct task_struct *task;
+ struct dma_chan *chan;
+ u8 *srcbuf;
+ u8 *dstbuf;
+};
+
+struct dmatest_chan {
+ struct list_head node;
+ struct dma_chan *chan;
+ struct list_head threads;
+};
+
+/*
+ * These are protected by dma_list_mutex since they're only used by
+ * the DMA client event callback
+ */
+static LIST_HEAD(dmatest_channels);
+static unsigned int nr_channels;
+
+static bool dmatest_match_channel(struct dma_chan *chan)
+{
+ if (test_channel[0] == '\0')
+ return true;
+ return strcmp(chan->dev.bus_id, test_channel) == 0;
+}
+
+static bool dmatest_match_device(struct dma_device *device)
+{
+ if (test_device[0] == '\0')
+ return true;
+ return strcmp(device->dev->bus_id, test_device) == 0;
+}
+
+static unsigned long dmatest_random(void)
+{
+ unsigned long buf;
+
+ get_random_bytes(&buf, sizeof(buf));
+ return buf;
+}
+
+static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < start + len; i++)
+ buf[i] = PATTERN_SRC | PATTERN_COPY
+ | (~i & PATTERN_COUNT_MASK);;
+ for ( ; i < test_buf_size; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+}
+
+static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < start + len; i++)
+ buf[i] = PATTERN_DST | PATTERN_OVERWRITE
+ | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < test_buf_size; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+}
+
+static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
+ unsigned int counter, bool is_srcbuf)
+{
+ u8 diff = actual ^ pattern;
+ u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ const char *thread_name = current->comm;
+
+ if (is_srcbuf)
+ pr_warning("%s: srcbuf[0x%x] overwritten!"
+ " Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if ((pattern & PATTERN_COPY)
+ && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
+ pr_warning("%s: dstbuf[0x%x] not copied!"
+ " Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if (diff & PATTERN_SRC)
+ pr_warning("%s: dstbuf[0x%x] was copied!"
+ " Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else
+ pr_warning("%s: dstbuf[0x%x] mismatch!"
+ " Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+}
+
+static unsigned int dmatest_verify(u8 *buf, unsigned int start,
+ unsigned int end, unsigned int counter, u8 pattern,
+ bool is_srcbuf)
+{
+ unsigned int i;
+ unsigned int error_count = 0;
+ u8 actual;
+
+ for (i = start; i < end; i++) {
+ actual = buf[i];
+ if (actual != (pattern | (~counter & PATTERN_COUNT_MASK))) {
+ if (error_count < 32)
+ dmatest_mismatch(actual, pattern, i, counter,
+ is_srcbuf);
+ error_count++;
+ }
+ counter++;
+ }
+
+ if (error_count > 32)
+ pr_warning("%s: %u errors suppressed\n",
+ current->comm, error_count - 32);
+
+ return error_count;
+}
+
+/*
+ * This function repeatedly tests DMA transfers of various lengths and
+ * offsets until it is told to exit by kthread_stop(). There may be
+ * multiple threads running this function in parallel for a single
+ * channel, and there may be multiple channels being tested in
+ * parallel.
+ *
+ * Before each test, the source and destination buffer is initialized
+ * with a known pattern. This pattern is different depending on
+ * whether it's in an area which is supposed to be copied or
+ * overwritten, and different in the source and destination buffers.
+ * So if the DMA engine doesn't copy exactly what we tell it to copy,
+ * we'll notice.
+ */
+static int dmatest_func(void *data)
+{
+ struct dmatest_thread *thread = data;
+ struct dma_chan *chan;
+ const char *thread_name;
+ unsigned int src_off, dst_off, len;
+ unsigned int error_count;
+ unsigned int failed_tests = 0;
+ unsigned int total_tests = 0;
+ dma_cookie_t cookie;
+ enum dma_status status;
+ int ret;
+
+ thread_name = current->comm;
+
+ ret = -ENOMEM;
+ thread->srcbuf = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->srcbuf)
+ goto err_srcbuf;
+ thread->dstbuf = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->dstbuf)
+ goto err_dstbuf;
+
+ smp_rmb();
+ chan = thread->chan;
+ dma_chan_get(chan);
+
+ while (!kthread_should_stop()) {
+ total_tests++;
+
+ len = dmatest_random() % test_buf_size + 1;
+ src_off = dmatest_random() % (test_buf_size - len + 1);
+ dst_off = dmatest_random() % (test_buf_size - len + 1);
+
+ dmatest_init_srcbuf(thread->srcbuf, src_off, len);
+ dmatest_init_dstbuf(thread->dstbuf, dst_off, len);
+
+ cookie = dma_async_memcpy_buf_to_buf(chan,
+ thread->dstbuf + dst_off,
+ thread->srcbuf + src_off,
+ len);
+ if (dma_submit_error(cookie)) {
+ pr_warning("%s: #%u: submit error %d with src_off=0x%x "
+ "dst_off=0x%x len=0x%x\n",
+ thread_name, total_tests - 1, cookie,
+ src_off, dst_off, len);
+ msleep(100);
+ failed_tests++;
+ continue;
+ }
+ dma_async_memcpy_issue_pending(chan);
+
+ do {
+ msleep(1);
+ status = dma_async_memcpy_complete(
+ chan, cookie, NULL, NULL);
+ } while (status == DMA_IN_PROGRESS);
+
+ if (status == DMA_ERROR) {
+ pr_warning("%s: #%u: error during copy\n",
+ thread_name, total_tests - 1);
+ failed_tests++;
+ continue;
+ }
+
+ error_count = 0;
+
+ pr_debug("%s: verifying source buffer...\n", thread_name);
+ error_count += dmatest_verify(thread->srcbuf, 0, src_off,
+ 0, PATTERN_SRC, true);
+ error_count += dmatest_verify(thread->srcbuf, src_off,
+ src_off + len, src_off,
+ PATTERN_SRC | PATTERN_COPY, true);
+ error_count += dmatest_verify(thread->srcbuf, src_off + len,
+ test_buf_size, src_off + len,
+ PATTERN_SRC, true);
+
+ pr_debug("%s: verifying dest buffer...\n",
+ thread->task->comm);
+ error_count += dmatest_verify(thread->dstbuf, 0, dst_off,
+ 0, PATTERN_DST, false);
+ error_count += dmatest_verify(thread->dstbuf, dst_off,
+ dst_off + len, src_off,
+ PATTERN_SRC | PATTERN_COPY, false);
+ error_count += dmatest_verify(thread->dstbuf, dst_off + len,
+ test_buf_size, dst_off + len,
+ PATTERN_DST, false);
+
+ if (error_count) {
+ pr_warning("%s: #%u: %u errors with "
+ "src_off=0x%x dst_off=0x%x len=0x%x\n",
+ thread_name, total_tests - 1, error_count,
+ src_off, dst_off, len);
+ failed_tests++;
+ } else {
+ pr_debug("%s: #%u: No errors with "
+ "src_off=0x%x dst_off=0x%x len=0x%x\n",
+ thread_name, total_tests - 1,
+ src_off, dst_off, len);
+ }
+ }
+
+ ret = 0;
+ dma_chan_put(chan);
+ kfree(thread->dstbuf);
+err_dstbuf:
+ kfree(thread->srcbuf);
+err_srcbuf:
+ pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
+ thread_name, total_tests, failed_tests, ret);
+ return ret;
+}
+
+static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
+{
+ struct dmatest_thread *thread;
+ struct dmatest_thread *_thread;
+ int ret;
+
+ list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
+ ret = kthread_stop(thread->task);
+ pr_debug("dmatest: thread %s exited with status %d\n",
+ thread->task->comm, ret);
+ list_del(&thread->node);
+ kfree(thread);
+ }
+ kfree(dtc);
+}
+
+static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
+{
+ struct dmatest_chan *dtc;
+ struct dmatest_thread *thread;
+ unsigned int i;
+
+ dtc = kmalloc(sizeof(struct dmatest_chan), GFP_ATOMIC);
+ if (!dtc) {
+ pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id);
+ return DMA_NAK;
+ }
+
+ dtc->chan = chan;
+ INIT_LIST_HEAD(&dtc->threads);
+
+ for (i = 0; i < threads_per_chan; i++) {
+ thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
+ if (!thread) {
+ pr_warning("dmatest: No memory for %s-test%u\n",
+ chan->dev.bus_id, i);
+ break;
+ }
+ thread->chan = dtc->chan;
+ smp_wmb();
+ thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
+ chan->dev.bus_id, i);
+ if (IS_ERR(thread->task)) {
+ pr_warning("dmatest: Failed to run thread %s-test%u\n",
+ chan->dev.bus_id, i);
+ kfree(thread);
+ break;
+ }
+
+ /* srcbuf and dstbuf are allocated by the thread itself */
+
+ list_add_tail(&thread->node, &dtc->threads);
+ }
+
+ pr_info("dmatest: Started %u threads using %s\n", i, chan->dev.bus_id);
+
+ list_add_tail(&dtc->node, &dmatest_channels);
+ nr_channels++;
+
+ return DMA_ACK;
+}
+
+static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan)
+{
+ struct dmatest_chan *dtc, *_dtc;
+
+ list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
+ if (dtc->chan == chan) {
+ list_del(&dtc->node);
+ dmatest_cleanup_channel(dtc);
+ pr_debug("dmatest: lost channel %s\n",
+ chan->dev.bus_id);
+ return DMA_ACK;
+ }
+ }
+
+ return DMA_DUP;
+}
+
+/*
+ * Start testing threads as new channels are assigned to us, and kill
+ * them when the channels go away.
+ *
+ * When we unregister the client, all channels are removed so this
+ * will also take care of cleaning things up when the module is
+ * unloaded.
+ */
+static enum dma_state_client
+dmatest_event(struct dma_client *client, struct dma_chan *chan,
+ enum dma_state state)
+{
+ enum dma_state_client ack = DMA_NAK;
+
+ switch (state) {
+ case DMA_RESOURCE_AVAILABLE:
+ if (!dmatest_match_channel(chan)
+ || !dmatest_match_device(chan->device))
+ ack = DMA_DUP;
+ else if (max_channels && nr_channels >= max_channels)
+ ack = DMA_NAK;
+ else
+ ack = dmatest_add_channel(chan);
+ break;
+
+ case DMA_RESOURCE_REMOVED:
+ ack = dmatest_remove_channel(chan);
+ break;
+
+ default:
+ pr_info("dmatest: Unhandled event %u (%s)\n",
+ state, chan->dev.bus_id);
+ break;
+ }
+
+ return ack;
+}
+
+static struct dma_client dmatest_client = {
+ .event_callback = dmatest_event,
+};
+
+static int __init dmatest_init(void)
+{
+ dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask);
+ dma_async_client_register(&dmatest_client);
+ dma_async_client_chan_request(&dmatest_client);
+
+ return 0;
+}
+module_init(dmatest_init);
+
+static void __exit dmatest_exit(void)
+{
+ dma_async_client_unregister(&dmatest_client);
+}
+module_exit(dmatest_exit);
+
+MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
new file mode 100644
index 00000000000..94df9177124
--- /dev/null
+++ b/drivers/dma/dw_dmac.c
@@ -0,0 +1,1122 @@
+/*
+ * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
+ * AVR32 systems.)
+ *
+ * Copyright (C) 2007-2008 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dw_dmac_regs.h"
+
+/*
+ * This supports the Synopsys "DesignWare AHB Central DMA Controller",
+ * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
+ * of which use ARM any more). See the "Databook" from Synopsys for
+ * information beyond what licensees probably provide.
+ *
+ * The driver has currently been tested only with the Atmel AT32AP7000,
+ * which does not support descriptor writeback.
+ */
+
+/* NOTE: DMS+SMS is system-specific. We should get this information
+ * from the platform code somehow.
+ */
+#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \
+ | DWC_CTLL_SRC_MSIZE(0) \
+ | DWC_CTLL_DMS(0) \
+ | DWC_CTLL_SMS(1) \
+ | DWC_CTLL_LLP_D_EN \
+ | DWC_CTLL_LLP_S_EN)
+
+/*
+ * This is configuration-dependent and usually a funny size like 4095.
+ * Let's round it down to the nearest power of two.
+ *
+ * Note that this is a transfer count, i.e. if we transfer 32-bit
+ * words, we can do 8192 bytes per descriptor.
+ *
+ * This parameter is also system-specific.
+ */
+#define DWC_MAX_COUNT 2048U
+
+/*
+ * Number of descriptors to allocate for each channel. This should be
+ * made configurable somehow; preferably, the clients (at least the
+ * ones using slave transfers) should be able to give us a hint.
+ */
+#define NR_DESCS_PER_CHANNEL 64
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Because we're not relying on writeback from the controller (it may not
+ * even be configured into the core!) we don't need to use dma_pool. These
+ * descriptors -- and associated data -- are cacheable. We do need to make
+ * sure their dcache entries are written back before handing them off to
+ * the controller, though.
+ */
+
+static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
+{
+ return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
+}
+
+static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc)
+{
+ return list_entry(dwc->queue.next, struct dw_desc, desc_node);
+}
+
+static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
+{
+ struct dw_desc *desc, *_desc;
+ struct dw_desc *ret = NULL;
+ unsigned int i = 0;
+
+ spin_lock_bh(&dwc->lock);
+ list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
+ if (async_tx_test_ack(&desc->txd)) {
+ list_del(&desc->desc_node);
+ ret = desc;
+ break;
+ }
+ dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc);
+ i++;
+ }
+ spin_unlock_bh(&dwc->lock);
+
+ dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i);
+
+ return ret;
+}
+
+static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
+{
+ struct dw_desc *child;
+
+ list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+ dma_sync_single_for_cpu(dwc->chan.dev.parent,
+ child->txd.phys, sizeof(child->lli),
+ DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(dwc->chan.dev.parent,
+ desc->txd.phys, sizeof(desc->lli),
+ DMA_TO_DEVICE);
+}
+
+/*
+ * Move a descriptor, including any children, to the free list.
+ * `desc' must not be on any lists.
+ */
+static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
+{
+ if (desc) {
+ struct dw_desc *child;
+
+ dwc_sync_desc_for_cpu(dwc, desc);
+
+ spin_lock_bh(&dwc->lock);
+ list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+ dev_vdbg(&dwc->chan.dev,
+ "moving child desc %p to freelist\n",
+ child);
+ list_splice_init(&desc->txd.tx_list, &dwc->free_list);
+ dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc);
+ list_add(&desc->desc_node, &dwc->free_list);
+ spin_unlock_bh(&dwc->lock);
+ }
+}
+
+/* Called with dwc->lock held and bh disabled */
+static dma_cookie_t
+dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
+{
+ dma_cookie_t cookie = dwc->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ dwc->chan.cookie = cookie;
+ desc->txd.cookie = cookie;
+
+ return cookie;
+}
+
+/*----------------------------------------------------------------------*/
+
+/* Called with dwc->lock held and bh disabled */
+static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
+{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+ /* ASSERT: channel is idle */
+ if (dma_readl(dw, CH_EN) & dwc->mask) {
+ dev_err(&dwc->chan.dev,
+ "BUG: Attempted to start non-idle channel\n");
+ dev_err(&dwc->chan.dev,
+ " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+ channel_readl(dwc, SAR),
+ channel_readl(dwc, DAR),
+ channel_readl(dwc, LLP),
+ channel_readl(dwc, CTL_HI),
+ channel_readl(dwc, CTL_LO));
+
+ /* The tasklet will hopefully advance the queue... */
+ return;
+ }
+
+ channel_writel(dwc, LLP, first->txd.phys);
+ channel_writel(dwc, CTL_LO,
+ DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+ channel_writel(dwc, CTL_HI, 0);
+ channel_set_bit(dw, CH_EN, dwc->mask);
+}
+
+/*----------------------------------------------------------------------*/
+
+static void
+dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
+{
+ dma_async_tx_callback callback;
+ void *param;
+ struct dma_async_tx_descriptor *txd = &desc->txd;
+
+ dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie);
+
+ dwc->completed = txd->cookie;
+ callback = txd->callback;
+ param = txd->callback_param;
+
+ dwc_sync_desc_for_cpu(dwc, desc);
+ list_splice_init(&txd->tx_list, &dwc->free_list);
+ list_move(&desc->desc_node, &dwc->free_list);
+
+ /*
+ * We use dma_unmap_page() regardless of how the buffers were
+ * mapped before they were submitted...
+ */
+ if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
+ dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len,
+ DMA_FROM_DEVICE);
+ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
+ dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len,
+ DMA_TO_DEVICE);
+
+ /*
+ * The API requires that no submissions are done from a
+ * callback, so we don't need to drop the lock here
+ */
+ if (callback)
+ callback(param);
+}
+
+static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
+{
+ struct dw_desc *desc, *_desc;
+ LIST_HEAD(list);
+
+ if (dma_readl(dw, CH_EN) & dwc->mask) {
+ dev_err(&dwc->chan.dev,
+ "BUG: XFER bit set, but channel not idle!\n");
+
+ /* Try to continue after resetting the channel... */
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+ }
+
+ /*
+ * Submit queued descriptors ASAP, i.e. before we go through
+ * the completed ones.
+ */
+ if (!list_empty(&dwc->queue))
+ dwc_dostart(dwc, dwc_first_queued(dwc));
+ list_splice_init(&dwc->active_list, &list);
+ list_splice_init(&dwc->queue, &dwc->active_list);
+
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
+ dwc_descriptor_complete(dwc, desc);
+}
+
+static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
+{
+ dma_addr_t llp;
+ struct dw_desc *desc, *_desc;
+ struct dw_desc *child;
+ u32 status_xfer;
+
+ /*
+ * Clear block interrupt flag before scanning so that we don't
+ * miss any, and read LLP before RAW_XFER to ensure it is
+ * valid if we decide to scan the list.
+ */
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ llp = channel_readl(dwc, LLP);
+ status_xfer = dma_readl(dw, RAW.XFER);
+
+ if (status_xfer & dwc->mask) {
+ /* Everything we've submitted is done */
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+ dwc_complete_all(dw, dwc);
+ return;
+ }
+
+ dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp);
+
+ list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
+ if (desc->lli.llp == llp)
+ /* This one is currently in progress */
+ return;
+
+ list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+ if (child->lli.llp == llp)
+ /* Currently in progress */
+ return;
+
+ /*
+ * No descriptors so far seem to be in progress, i.e.
+ * this one must be done.
+ */
+ dwc_descriptor_complete(dwc, desc);
+ }
+
+ dev_err(&dwc->chan.dev,
+ "BUG: All descriptors done, but channel not idle!\n");
+
+ /* Try to continue after resetting the channel... */
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ if (!list_empty(&dwc->queue)) {
+ dwc_dostart(dwc, dwc_first_queued(dwc));
+ list_splice_init(&dwc->queue, &dwc->active_list);
+ }
+}
+
+static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
+{
+ dev_printk(KERN_CRIT, &dwc->chan.dev,
+ " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
+ lli->sar, lli->dar, lli->llp,
+ lli->ctlhi, lli->ctllo);
+}
+
+static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
+{
+ struct dw_desc *bad_desc;
+ struct dw_desc *child;
+
+ dwc_scan_descriptors(dw, dwc);
+
+ /*
+ * The descriptor currently at the head of the active list is
+ * borked. Since we don't have any way to report errors, we'll
+ * just have to scream loudly and try to carry on.
+ */
+ bad_desc = dwc_first_active(dwc);
+ list_del_init(&bad_desc->desc_node);
+ list_splice_init(&dwc->queue, dwc->active_list.prev);
+
+ /* Clear the error flag and try to restart the controller */
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ if (!list_empty(&dwc->active_list))
+ dwc_dostart(dwc, dwc_first_active(dwc));
+
+ /*
+ * KERN_CRITICAL may seem harsh, but since this only happens
+ * when someone submits a bad physical address in a
+ * descriptor, we should consider ourselves lucky that the
+ * controller flagged an error instead of scribbling over
+ * random memory locations.
+ */
+ dev_printk(KERN_CRIT, &dwc->chan.dev,
+ "Bad descriptor submitted for DMA!\n");
+ dev_printk(KERN_CRIT, &dwc->chan.dev,
+ " cookie: %d\n", bad_desc->txd.cookie);
+ dwc_dump_lli(dwc, &bad_desc->lli);
+ list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
+ dwc_dump_lli(dwc, &child->lli);
+
+ /* Pretend the descriptor completed successfully */
+ dwc_descriptor_complete(dwc, bad_desc);
+}
+
+static void dw_dma_tasklet(unsigned long data)
+{
+ struct dw_dma *dw = (struct dw_dma *)data;
+ struct dw_dma_chan *dwc;
+ u32 status_block;
+ u32 status_xfer;
+ u32 status_err;
+ int i;
+
+ status_block = dma_readl(dw, RAW.BLOCK);
+ status_xfer = dma_readl(dw, RAW.BLOCK);
+ status_err = dma_readl(dw, RAW.ERROR);
+
+ dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
+ status_block, status_err);
+
+ for (i = 0; i < dw->dma.chancnt; i++) {
+ dwc = &dw->chan[i];
+ spin_lock(&dwc->lock);
+ if (status_err & (1 << i))
+ dwc_handle_error(dw, dwc);
+ else if ((status_block | status_xfer) & (1 << i))
+ dwc_scan_descriptors(dw, dwc);
+ spin_unlock(&dwc->lock);
+ }
+
+ /*
+ * Re-enable interrupts. Block Complete interrupts are only
+ * enabled if the INT_EN bit in the descriptor is set. This
+ * will trigger a scan before the whole list is done.
+ */
+ channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
+ channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+ channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
+}
+
+static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
+{
+ struct dw_dma *dw = dev_id;
+ u32 status;
+
+ dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
+ dma_readl(dw, STATUS_INT));
+
+ /*
+ * Just disable the interrupts. We'll turn them back on in the
+ * softirq handler.
+ */
+ channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+
+ status = dma_readl(dw, STATUS_INT);
+ if (status) {
+ dev_err(dw->dma.dev,
+ "BUG: Unexpected interrupts pending: 0x%x\n",
+ status);
+
+ /* Try to recover */
+ channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
+ channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
+ channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
+ channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
+ channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
+ }
+
+ tasklet_schedule(&dw->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/*----------------------------------------------------------------------*/
+
+static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct dw_desc *desc = txd_to_dw_desc(tx);
+ struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ spin_lock_bh(&dwc->lock);
+ cookie = dwc_assign_cookie(dwc, desc);
+
+ /*
+ * REVISIT: We should attempt to chain as many descriptors as
+ * possible, perhaps even appending to those already submitted
+ * for DMA. But this is hard to do in a race-free manner.
+ */
+ if (list_empty(&dwc->active_list)) {
+ dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n",
+ desc->txd.cookie);
+ dwc_dostart(dwc, desc);
+ list_add_tail(&desc->desc_node, &dwc->active_list);
+ } else {
+ dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n",
+ desc->txd.cookie);
+
+ list_add_tail(&desc->desc_node, &dwc->queue);
+ }
+
+ spin_unlock_bh(&dwc->lock);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor *
+dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_desc *desc;
+ struct dw_desc *first;
+ struct dw_desc *prev;
+ size_t xfer_count;
+ size_t offset;
+ unsigned int src_width;
+ unsigned int dst_width;
+ u32 ctllo;
+
+ dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
+ dest, src, len, flags);
+
+ if (unlikely(!len)) {
+ dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n");
+ return NULL;
+ }
+
+ /*
+ * We can be a lot more clever here, but this should take care
+ * of the most common optimization.
+ */
+ if (!((src | dest | len) & 3))
+ src_width = dst_width = 2;
+ else if (!((src | dest | len) & 1))
+ src_width = dst_width = 1;
+ else
+ src_width = dst_width = 0;
+
+ ctllo = DWC_DEFAULT_CTLLO
+ | DWC_CTLL_DST_WIDTH(dst_width)
+ | DWC_CTLL_SRC_WIDTH(src_width)
+ | DWC_CTLL_DST_INC
+ | DWC_CTLL_SRC_INC
+ | DWC_CTLL_FC_M2M;
+ prev = first = NULL;
+
+ for (offset = 0; offset < len; offset += xfer_count << src_width) {
+ xfer_count = min_t(size_t, (len - offset) >> src_width,
+ DWC_MAX_COUNT);
+
+ desc = dwc_desc_get(dwc);
+ if (!desc)
+ goto err_desc_get;
+
+ desc->lli.sar = src + offset;
+ desc->lli.dar = dest + offset;
+ desc->lli.ctllo = ctllo;
+ desc->lli.ctlhi = xfer_count;
+
+ if (!first) {
+ first = desc;
+ } else {
+ prev->lli.llp = desc->txd.phys;
+ dma_sync_single_for_device(chan->dev.parent,
+ prev->txd.phys, sizeof(prev->lli),
+ DMA_TO_DEVICE);
+ list_add_tail(&desc->desc_node,
+ &first->txd.tx_list);
+ }
+ prev = desc;
+ }
+
+
+ if (flags & DMA_PREP_INTERRUPT)
+ /* Trigger interrupt after last block */
+ prev->lli.ctllo |= DWC_CTLL_INT_EN;
+
+ prev->lli.llp = 0;
+ dma_sync_single_for_device(chan->dev.parent,
+ prev->txd.phys, sizeof(prev->lli),
+ DMA_TO_DEVICE);
+
+ first->txd.flags = flags;
+ first->len = len;
+
+ return &first->txd;
+
+err_desc_get:
+ dwc_desc_put(dwc, first);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma_slave *dws = dwc->dws;
+ struct dw_desc *prev;
+ struct dw_desc *first;
+ u32 ctllo;
+ dma_addr_t reg;
+ unsigned int reg_width;
+ unsigned int mem_width;
+ unsigned int i;
+ struct scatterlist *sg;
+ size_t total_len = 0;
+
+ dev_vdbg(&chan->dev, "prep_dma_slave\n");
+
+ if (unlikely(!dws || !sg_len))
+ return NULL;
+
+ reg_width = dws->slave.reg_width;
+ prev = first = NULL;
+
+ sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction);
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ ctllo = (DWC_DEFAULT_CTLLO
+ | DWC_CTLL_DST_WIDTH(reg_width)
+ | DWC_CTLL_DST_FIX
+ | DWC_CTLL_SRC_INC
+ | DWC_CTLL_FC_M2P);
+ reg = dws->slave.tx_reg;
+ for_each_sg(sgl, sg, sg_len, i) {
+ struct dw_desc *desc;
+ u32 len;
+ u32 mem;
+
+ desc = dwc_desc_get(dwc);
+ if (!desc) {
+ dev_err(&chan->dev,
+ "not enough descriptors available\n");
+ goto err_desc_get;
+ }
+
+ mem = sg_phys(sg);
+ len = sg_dma_len(sg);
+ mem_width = 2;
+ if (unlikely(mem & 3 || len & 3))
+ mem_width = 0;
+
+ desc->lli.sar = mem;
+ desc->lli.dar = reg;
+ desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
+ desc->lli.ctlhi = len >> mem_width;
+
+ if (!first) {
+ first = desc;
+ } else {
+ prev->lli.llp = desc->txd.phys;
+ dma_sync_single_for_device(chan->dev.parent,
+ prev->txd.phys,
+ sizeof(prev->lli),
+ DMA_TO_DEVICE);
+ list_add_tail(&desc->desc_node,
+ &first->txd.tx_list);
+ }
+ prev = desc;
+ total_len += len;
+ }
+ break;
+ case DMA_FROM_DEVICE:
+ ctllo = (DWC_DEFAULT_CTLLO
+ | DWC_CTLL_SRC_WIDTH(reg_width)
+ | DWC_CTLL_DST_INC
+ | DWC_CTLL_SRC_FIX
+ | DWC_CTLL_FC_P2M);
+
+ reg = dws->slave.rx_reg;
+ for_each_sg(sgl, sg, sg_len, i) {
+ struct dw_desc *desc;
+ u32 len;
+ u32 mem;
+
+ desc = dwc_desc_get(dwc);
+ if (!desc) {
+ dev_err(&chan->dev,
+ "not enough descriptors available\n");
+ goto err_desc_get;
+ }
+
+ mem = sg_phys(sg);
+ len = sg_dma_len(sg);
+ mem_width = 2;
+ if (unlikely(mem & 3 || len & 3))
+ mem_width = 0;
+
+ desc->lli.sar = reg;
+ desc->lli.dar = mem;
+ desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
+ desc->lli.ctlhi = len >> reg_width;
+
+ if (!first) {
+ first = desc;
+ } else {
+ prev->lli.llp = desc->txd.phys;
+ dma_sync_single_for_device(chan->dev.parent,
+ prev->txd.phys,
+ sizeof(prev->lli),
+ DMA_TO_DEVICE);
+ list_add_tail(&desc->desc_node,
+ &first->txd.tx_list);
+ }
+ prev = desc;
+ total_len += len;
+ }
+ break;
+ default:
+ return NULL;
+ }
+
+ if (flags & DMA_PREP_INTERRUPT)
+ /* Trigger interrupt after last block */
+ prev->lli.ctllo |= DWC_CTLL_INT_EN;
+
+ prev->lli.llp = 0;
+ dma_sync_single_for_device(chan->dev.parent,
+ prev->txd.phys, sizeof(prev->lli),
+ DMA_TO_DEVICE);
+
+ first->len = total_len;
+
+ return &first->txd;
+
+err_desc_get:
+ dwc_desc_put(dwc, first);
+ return NULL;
+}
+
+static void dwc_terminate_all(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ struct dw_desc *desc, *_desc;
+ LIST_HEAD(list);
+
+ /*
+ * This is only called when something went wrong elsewhere, so
+ * we don't really care about the data. Just disable the
+ * channel. We still have to poll the channel enable bit due
+ * to AHB/HSB limitations.
+ */
+ spin_lock_bh(&dwc->lock);
+
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ /* active_list entries will end up before queued entries */
+ list_splice_init(&dwc->queue, &list);
+ list_splice_init(&dwc->active_list, &list);
+
+ spin_unlock_bh(&dwc->lock);
+
+ /* Flush all pending and queued descriptors */
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
+ dwc_descriptor_complete(dwc, desc);
+}
+
+static enum dma_status
+dwc_is_tx_complete(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ dma_cookie_t *done, dma_cookie_t *used)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ int ret;
+
+ last_complete = dwc->completed;
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ if (ret != DMA_SUCCESS) {
+ dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+
+ last_complete = dwc->completed;
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ }
+
+ if (done)
+ *done = last_complete;
+ if (used)
+ *used = last_used;
+
+ return ret;
+}
+
+static void dwc_issue_pending(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+
+ spin_lock_bh(&dwc->lock);
+ if (!list_empty(&dwc->queue))
+ dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+ spin_unlock_bh(&dwc->lock);
+}
+
+static int dwc_alloc_chan_resources(struct dma_chan *chan,
+ struct dma_client *client)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ struct dw_desc *desc;
+ struct dma_slave *slave;
+ struct dw_dma_slave *dws;
+ int i;
+ u32 cfghi;
+ u32 cfglo;
+
+ dev_vdbg(&chan->dev, "alloc_chan_resources\n");
+
+ /* Channels doing slave DMA can only handle one client. */
+ if (dwc->dws || client->slave) {
+ if (chan->client_count)
+ return -EBUSY;
+ }
+
+ /* ASSERT: channel is idle */
+ if (dma_readl(dw, CH_EN) & dwc->mask) {
+ dev_dbg(&chan->dev, "DMA channel not idle?\n");
+ return -EIO;
+ }
+
+ dwc->completed = chan->cookie = 1;
+
+ cfghi = DWC_CFGH_FIFO_MODE;
+ cfglo = 0;
+
+ slave = client->slave;
+ if (slave) {
+ /*
+ * We need controller-specific data to set up slave
+ * transfers.
+ */
+ BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev);
+
+ dws = container_of(slave, struct dw_dma_slave, slave);
+
+ dwc->dws = dws;
+ cfghi = dws->cfg_hi;
+ cfglo = dws->cfg_lo;
+ } else {
+ dwc->dws = NULL;
+ }
+
+ channel_writel(dwc, CFG_LO, cfglo);
+ channel_writel(dwc, CFG_HI, cfghi);
+
+ /*
+ * NOTE: some controllers may have additional features that we
+ * need to initialize here, like "scatter-gather" (which
+ * doesn't mean what you think it means), and status writeback.
+ */
+
+ spin_lock_bh(&dwc->lock);
+ i = dwc->descs_allocated;
+ while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
+ spin_unlock_bh(&dwc->lock);
+
+ desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
+ if (!desc) {
+ dev_info(&chan->dev,
+ "only allocated %d descriptors\n", i);
+ spin_lock_bh(&dwc->lock);
+ break;
+ }
+
+ dma_async_tx_descriptor_init(&desc->txd, chan);
+ desc->txd.tx_submit = dwc_tx_submit;
+ desc->txd.flags = DMA_CTRL_ACK;
+ INIT_LIST_HEAD(&desc->txd.tx_list);
+ desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli,
+ sizeof(desc->lli), DMA_TO_DEVICE);
+ dwc_desc_put(dwc, desc);
+
+ spin_lock_bh(&dwc->lock);
+ i = ++dwc->descs_allocated;
+ }
+
+ /* Enable interrupts */
+ channel_set_bit(dw, MASK.XFER, dwc->mask);
+ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+ channel_set_bit(dw, MASK.ERROR, dwc->mask);
+
+ spin_unlock_bh(&dwc->lock);
+
+ dev_dbg(&chan->dev,
+ "alloc_chan_resources allocated %d descriptors\n", i);
+
+ return i;
+}
+
+static void dwc_free_chan_resources(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ struct dw_desc *desc, *_desc;
+ LIST_HEAD(list);
+
+ dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n",
+ dwc->descs_allocated);
+
+ /* ASSERT: channel is idle */
+ BUG_ON(!list_empty(&dwc->active_list));
+ BUG_ON(!list_empty(&dwc->queue));
+ BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
+
+ spin_lock_bh(&dwc->lock);
+ list_splice_init(&dwc->free_list, &list);
+ dwc->descs_allocated = 0;
+ dwc->dws = NULL;
+
+ /* Disable interrupts */
+ channel_clear_bit(dw, MASK.XFER, dwc->mask);
+ channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
+ channel_clear_bit(dw, MASK.ERROR, dwc->mask);
+
+ spin_unlock_bh(&dwc->lock);
+
+ list_for_each_entry_safe(desc, _desc, &list, desc_node) {
+ dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc);
+ dma_unmap_single(chan->dev.parent, desc->txd.phys,
+ sizeof(desc->lli), DMA_TO_DEVICE);
+ kfree(desc);
+ }
+
+ dev_vdbg(&chan->dev, "free_chan_resources done\n");
+}
+
+/*----------------------------------------------------------------------*/
+
+static void dw_dma_off(struct dw_dma *dw)
+{
+ dma_writel(dw, CFG, 0);
+
+ channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+
+ while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
+ cpu_relax();
+}
+
+static int __init dw_probe(struct platform_device *pdev)
+{
+ struct dw_dma_platform_data *pdata;
+ struct resource *io;
+ struct dw_dma *dw;
+ size_t size;
+ int irq;
+ int err;
+ int i;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
+ return -EINVAL;
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!io)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ size = sizeof(struct dw_dma);
+ size += pdata->nr_channels * sizeof(struct dw_dma_chan);
+ dw = kzalloc(size, GFP_KERNEL);
+ if (!dw)
+ return -ENOMEM;
+
+ if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
+ err = -EBUSY;
+ goto err_kfree;
+ }
+
+ memset(dw, 0, sizeof *dw);
+
+ dw->regs = ioremap(io->start, DW_REGLEN);
+ if (!dw->regs) {
+ err = -ENOMEM;
+ goto err_release_r;
+ }
+
+ dw->clk = clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(dw->clk)) {
+ err = PTR_ERR(dw->clk);
+ goto err_clk;
+ }
+ clk_enable(dw->clk);
+
+ /* force dma off, just in case */
+ dw_dma_off(dw);
+
+ err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
+ if (err)
+ goto err_irq;
+
+ platform_set_drvdata(pdev, dw);
+
+ tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
+
+ dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
+
+ INIT_LIST_HEAD(&dw->dma.channels);
+ for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
+ struct dw_dma_chan *dwc = &dw->chan[i];
+
+ dwc->chan.device = &dw->dma;
+ dwc->chan.cookie = dwc->completed = 1;
+ dwc->chan.chan_id = i;
+ list_add_tail(&dwc->chan.device_node, &dw->dma.channels);
+
+ dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
+ spin_lock_init(&dwc->lock);
+ dwc->mask = 1 << i;
+
+ INIT_LIST_HEAD(&dwc->active_list);
+ INIT_LIST_HEAD(&dwc->queue);
+ INIT_LIST_HEAD(&dwc->free_list);
+
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ }
+
+ /* Clear/disable all interrupts on all channels. */
+ dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
+ dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
+ dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
+ dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
+ dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
+
+ channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+
+ dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
+ dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
+ dw->dma.dev = &pdev->dev;
+ dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
+ dw->dma.device_free_chan_resources = dwc_free_chan_resources;
+
+ dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
+
+ dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
+ dw->dma.device_terminate_all = dwc_terminate_all;
+
+ dw->dma.device_is_tx_complete = dwc_is_tx_complete;
+ dw->dma.device_issue_pending = dwc_issue_pending;
+
+ dma_writel(dw, CFG, DW_CFG_DMA_EN);
+
+ printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
+ pdev->dev.bus_id, dw->dma.chancnt);
+
+ dma_async_device_register(&dw->dma);
+
+ return 0;
+
+err_irq:
+ clk_disable(dw->clk);
+ clk_put(dw->clk);
+err_clk:
+ iounmap(dw->regs);
+ dw->regs = NULL;
+err_release_r:
+ release_resource(io);
+err_kfree:
+ kfree(dw);
+ return err;
+}
+
+static int __exit dw_remove(struct platform_device *pdev)
+{
+ struct dw_dma *dw = platform_get_drvdata(pdev);
+ struct dw_dma_chan *dwc, *_dwc;
+ struct resource *io;
+
+ dw_dma_off(dw);
+ dma_async_device_unregister(&dw->dma);
+
+ free_irq(platform_get_irq(pdev, 0), dw);
+ tasklet_kill(&dw->tasklet);
+
+ list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
+ chan.device_node) {
+ list_del(&dwc->chan.device_node);
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ }
+
+ clk_disable(dw->clk);
+ clk_put(dw->clk);
+
+ iounmap(dw->regs);
+ dw->regs = NULL;
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(io->start, DW_REGLEN);
+
+ kfree(dw);
+
+ return 0;
+}
+
+static void dw_shutdown(struct platform_device *pdev)
+{
+ struct dw_dma *dw = platform_get_drvdata(pdev);
+
+ dw_dma_off(platform_get_drvdata(pdev));
+ clk_disable(dw->clk);
+}
+
+static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg)
+{
+ struct dw_dma *dw = platform_get_drvdata(pdev);
+
+ dw_dma_off(platform_get_drvdata(pdev));
+ clk_disable(dw->clk);
+ return 0;
+}
+
+static int dw_resume_early(struct platform_device *pdev)
+{
+ struct dw_dma *dw = platform_get_drvdata(pdev);
+
+ clk_enable(dw->clk);
+ dma_writel(dw, CFG, DW_CFG_DMA_EN);
+ return 0;
+
+}
+
+static struct platform_driver dw_driver = {
+ .remove = __exit_p(dw_remove),
+ .shutdown = dw_shutdown,
+ .suspend_late = dw_suspend_late,
+ .resume_early = dw_resume_early,
+ .driver = {
+ .name = "dw_dmac",
+ },
+};
+
+static int __init dw_init(void)
+{
+ return platform_driver_probe(&dw_driver, dw_probe);
+}
+module_init(dw_init);
+
+static void __exit dw_exit(void)
+{
+ platform_driver_unregister(&dw_driver);
+}
+module_exit(dw_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
+MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
new file mode 100644
index 00000000000..00fdd187bb0
--- /dev/null
+++ b/drivers/dma/dw_dmac_regs.h
@@ -0,0 +1,225 @@
+/*
+ * Driver for the Synopsys DesignWare AHB DMA Controller
+ *
+ * Copyright (C) 2005-2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dw_dmac.h>
+
+#define DW_DMA_MAX_NR_CHANNELS 8
+
+/*
+ * Redefine this macro to handle differences between 32- and 64-bit
+ * addressing, big vs. little endian, etc.
+ */
+#define DW_REG(name) u32 name; u32 __pad_##name
+
+/* Hardware register definitions. */
+struct dw_dma_chan_regs {
+ DW_REG(SAR); /* Source Address Register */
+ DW_REG(DAR); /* Destination Address Register */
+ DW_REG(LLP); /* Linked List Pointer */
+ u32 CTL_LO; /* Control Register Low */
+ u32 CTL_HI; /* Control Register High */
+ DW_REG(SSTAT);
+ DW_REG(DSTAT);
+ DW_REG(SSTATAR);
+ DW_REG(DSTATAR);
+ u32 CFG_LO; /* Configuration Register Low */
+ u32 CFG_HI; /* Configuration Register High */
+ DW_REG(SGR);
+ DW_REG(DSR);
+};
+
+struct dw_dma_irq_regs {
+ DW_REG(XFER);
+ DW_REG(BLOCK);
+ DW_REG(SRC_TRAN);
+ DW_REG(DST_TRAN);
+ DW_REG(ERROR);
+};
+
+struct dw_dma_regs {
+ /* per-channel registers */
+ struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS];
+
+ /* irq handling */
+ struct dw_dma_irq_regs RAW; /* r */
+ struct dw_dma_irq_regs STATUS; /* r (raw & mask) */
+ struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */
+ struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */
+
+ DW_REG(STATUS_INT); /* r */
+
+ /* software handshaking */
+ DW_REG(REQ_SRC);
+ DW_REG(REQ_DST);
+ DW_REG(SGL_REQ_SRC);
+ DW_REG(SGL_REQ_DST);
+ DW_REG(LAST_SRC);
+ DW_REG(LAST_DST);
+
+ /* miscellaneous */
+ DW_REG(CFG);
+ DW_REG(CH_EN);
+ DW_REG(ID);
+ DW_REG(TEST);
+
+ /* optional encoded params, 0x3c8..0x3 */
+};
+
+/* Bitfields in CTL_LO */
+#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
+#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
+#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4)
+#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */
+#define DWC_CTLL_DST_DEC (1<<7)
+#define DWC_CTLL_DST_FIX (2<<7)
+#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */
+#define DWC_CTLL_SRC_DEC (1<<9)
+#define DWC_CTLL_SRC_FIX (2<<9)
+#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
+#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
+#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
+#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
+#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
+#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
+#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
+#define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */
+/* plus 4 transfer types for peripheral-as-flow-controller */
+#define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */
+#define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */
+#define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
+#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */
+
+/* Bitfields in CTL_HI */
+#define DWC_CTLH_DONE 0x00001000
+#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
+
+/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
+#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
+#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
+#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
+#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */
+#define DWC_CFGL_MAX_BURST(x) ((x) << 20)
+#define DWC_CFGL_RELOAD_SAR (1 << 30)
+#define DWC_CFGL_RELOAD_DAR (1 << 31)
+
+/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
+#define DWC_CFGH_DS_UPD_EN (1 << 5)
+#define DWC_CFGH_SS_UPD_EN (1 << 6)
+
+/* Bitfields in SGR */
+#define DWC_SGR_SGI(x) ((x) << 0)
+#define DWC_SGR_SGC(x) ((x) << 20)
+
+/* Bitfields in DSR */
+#define DWC_DSR_DSI(x) ((x) << 0)
+#define DWC_DSR_DSC(x) ((x) << 20)
+
+/* Bitfields in CFG */
+#define DW_CFG_DMA_EN (1 << 0)
+
+#define DW_REGLEN 0x400
+
+struct dw_dma_chan {
+ struct dma_chan chan;
+ void __iomem *ch_regs;
+ u8 mask;
+
+ spinlock_t lock;
+
+ /* these other elements are all protected by lock */
+ dma_cookie_t completed;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+
+ struct dw_dma_slave *dws;
+
+ unsigned int descs_allocated;
+};
+
+static inline struct dw_dma_chan_regs __iomem *
+__dwc_regs(struct dw_dma_chan *dwc)
+{
+ return dwc->ch_regs;
+}
+
+#define channel_readl(dwc, name) \
+ __raw_readl(&(__dwc_regs(dwc)->name))
+#define channel_writel(dwc, name, val) \
+ __raw_writel((val), &(__dwc_regs(dwc)->name))
+
+static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct dw_dma_chan, chan);
+}
+
+
+struct dw_dma {
+ struct dma_device dma;
+ void __iomem *regs;
+ struct tasklet_struct tasklet;
+ struct clk *clk;
+
+ u8 all_chan_mask;
+
+ struct dw_dma_chan chan[0];
+};
+
+static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
+{
+ return dw->regs;
+}
+
+#define dma_readl(dw, name) \
+ __raw_readl(&(__dw_regs(dw)->name))
+#define dma_writel(dw, name, val) \
+ __raw_writel((val), &(__dw_regs(dw)->name))
+
+#define channel_set_bit(dw, reg, mask) \
+ dma_writel(dw, reg, ((mask) << 8) | (mask))
+#define channel_clear_bit(dw, reg, mask) \
+ dma_writel(dw, reg, ((mask) << 8) | 0)
+
+static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
+{
+ return container_of(ddev, struct dw_dma, dma);
+}
+
+/* LLI == Linked List Item; a.k.a. DMA block descriptor */
+struct dw_lli {
+ /* values that are not changed by hardware */
+ dma_addr_t sar;
+ dma_addr_t dar;
+ dma_addr_t llp; /* chain to next lli */
+ u32 ctllo;
+ /* values that may get written back: */
+ u32 ctlhi;
+ /* sstat and dstat can snapshot peripheral register state.
+ * silicon config may discard either or both...
+ */
+ u32 sstat;
+ u32 dstat;
+};
+
+struct dw_desc {
+ /* FIRST values the hardware uses */
+ struct dw_lli lli;
+
+ /* THEN values for driver housekeeping */
+ struct list_head desc_node;
+ struct dma_async_tx_descriptor txd;
+ size_t len;
+};
+
+static inline struct dw_desc *
+txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
+{
+ return container_of(txd, struct dw_desc, txd);
+}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 054eabffc18..c0059ca5834 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -366,7 +366,8 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
*
* Return - The number of descriptors allocated.
*/
-static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
+static int fsl_dma_alloc_chan_resources(struct dma_chan *chan,
+ struct dma_client *client)
{
struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
LIST_HEAD(tmp_list);
@@ -809,8 +810,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
if (!src) {
dev_err(fsl_chan->dev,
"selftest: Cannot alloc memory for test!\n");
- err = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
dest = src + test_size;
@@ -820,7 +820,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
chan = &fsl_chan->common;
- if (fsl_dma_alloc_chan_resources(chan) < 1) {
+ if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
dev_err(fsl_chan->dev,
"selftest: Cannot alloc resources for DMA\n");
err = -ENODEV;
@@ -842,13 +842,13 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
dev_err(fsl_chan->dev, "selftest: Time out!\n");
err = -ENODEV;
- goto out;
+ goto free_resources;
}
/* Test free and re-alloc channel resources */
fsl_dma_free_chan_resources(chan);
- if (fsl_dma_alloc_chan_resources(chan) < 1) {
+ if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
dev_err(fsl_chan->dev,
"selftest: Cannot alloc resources for DMA\n");
err = -ENODEV;
@@ -927,8 +927,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
if (!new_fsl_chan) {
dev_err(&dev->dev, "No free memory for allocating "
"dma channels!\n");
- err = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
/* get dma channel register base */
@@ -936,7 +935,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
if (err) {
dev_err(&dev->dev, "Can't get %s property 'reg'\n",
dev->node->full_name);
- goto err;
+ goto err_no_reg;
}
new_fsl_chan->feature = *(u32 *)match->data;
@@ -958,7 +957,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
dev_err(&dev->dev, "There is no %d channel!\n",
new_fsl_chan->id);
err = -EINVAL;
- goto err;
+ goto err_no_chan;
}
fdev->chan[new_fsl_chan->id] = new_fsl_chan;
tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
@@ -997,23 +996,26 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
if (err) {
dev_err(&dev->dev, "DMA channel %s request_irq error "
"with return %d\n", dev->node->full_name, err);
- goto err;
+ goto err_no_irq;
}
}
err = fsl_dma_self_test(new_fsl_chan);
if (err)
- goto err;
+ goto err_self_test;
dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
match->compatible, new_fsl_chan->irq);
return 0;
-err:
- dma_halt(new_fsl_chan);
- iounmap(new_fsl_chan->reg_base);
+
+err_self_test:
free_irq(new_fsl_chan->irq, new_fsl_chan);
+err_no_irq:
list_del(&new_fsl_chan->common.device_node);
+err_no_chan:
+ iounmap(new_fsl_chan->reg_base);
+err_no_reg:
kfree(new_fsl_chan);
return err;
}
@@ -1054,8 +1056,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
if (!fdev) {
dev_err(&dev->dev, "No enough memory for 'priv'\n");
- err = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
fdev->dev = &dev->dev;
INIT_LIST_HEAD(&fdev->common.channels);
@@ -1065,7 +1066,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
if (err) {
dev_err(&dev->dev, "Can't get %s property 'reg'\n",
dev->node->full_name);
- goto err;
+ goto err_no_reg;
}
dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
@@ -1103,6 +1104,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
err:
iounmap(fdev->reg_base);
+err_no_reg:
kfree(fdev);
return err;
}
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c
index 16e0fd8facf..9b16a3af9a0 100644
--- a/drivers/dma/ioat.c
+++ b/drivers/dma/ioat.c
@@ -47,6 +47,16 @@ static struct pci_device_id ioat_pci_tbl[] = {
/* I/OAT v2 platforms */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
+
+ /* I/OAT v3 platforms */
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
{ 0, }
};
@@ -83,6 +93,11 @@ static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase)
if (device->dma && ioat_dca_enabled)
device->dca = ioat2_dca_init(pdev, iobase);
break;
+ case IOAT_VER_3_0:
+ device->dma = ioat_dma_probe(pdev, iobase);
+ if (device->dma && ioat_dca_enabled)
+ device->dca = ioat3_dca_init(pdev, iobase);
+ break;
default:
err = -ENODEV;
break;
diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat_dca.c
index 9e922760b7f..6cf622da028 100644
--- a/drivers/dma/ioat_dca.c
+++ b/drivers/dma/ioat_dca.c
@@ -37,12 +37,18 @@
#include "ioatdma_registers.h"
/*
- * Bit 16 of a tag map entry is the "valid" bit, if it is set then bits 0:15
+ * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
* contain the bit number of the APIC ID to map into the DCA tag. If the valid
* bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
*/
#define DCA_TAG_MAP_VALID 0x80
+#define DCA3_TAG_MAP_BIT_TO_INV 0x80
+#define DCA3_TAG_MAP_BIT_TO_SEL 0x40
+#define DCA3_TAG_MAP_LITERAL_VAL 0x1
+
+#define DCA_TAG_MAP_MASK 0xDF
+
/*
* "Legacy" DCA systems do not implement the DCA register set in the
* I/OAT device. Software needs direct support for their tag mappings.
@@ -95,6 +101,7 @@ struct ioat_dca_slot {
};
#define IOAT_DCA_MAX_REQ 6
+#define IOAT3_DCA_MAX_REQ 2
struct ioat_dca_priv {
void __iomem *iobase;
@@ -171,7 +178,9 @@ static int ioat_dca_remove_requester(struct dca_provider *dca,
return -ENODEV;
}
-static u8 ioat_dca_get_tag(struct dca_provider *dca, int cpu)
+static u8 ioat_dca_get_tag(struct dca_provider *dca,
+ struct device *dev,
+ int cpu)
{
struct ioat_dca_priv *ioatdca = dca_priv(dca);
int i, apic_id, bit, value;
@@ -193,10 +202,26 @@ static u8 ioat_dca_get_tag(struct dca_provider *dca, int cpu)
return tag;
}
+static int ioat_dca_dev_managed(struct dca_provider *dca,
+ struct device *dev)
+{
+ struct ioat_dca_priv *ioatdca = dca_priv(dca);
+ struct pci_dev *pdev;
+ int i;
+
+ pdev = to_pci_dev(dev);
+ for (i = 0; i < ioatdca->max_requesters; i++) {
+ if (ioatdca->req_slots[i].pdev == pdev)
+ return 1;
+ }
+ return 0;
+}
+
static struct dca_ops ioat_dca_ops = {
.add_requester = ioat_dca_add_requester,
.remove_requester = ioat_dca_remove_requester,
.get_tag = ioat_dca_get_tag,
+ .dev_managed = ioat_dca_dev_managed,
};
@@ -207,6 +232,8 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
u8 *tag_map = NULL;
int i;
int err;
+ u8 version;
+ u8 max_requesters;
if (!system_has_dca_enabled(pdev))
return NULL;
@@ -237,15 +264,20 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
if (tag_map == NULL)
return NULL;
+ version = readb(iobase + IOAT_VER_OFFSET);
+ if (version == IOAT_VER_3_0)
+ max_requesters = IOAT3_DCA_MAX_REQ;
+ else
+ max_requesters = IOAT_DCA_MAX_REQ;
+
dca = alloc_dca_provider(&ioat_dca_ops,
sizeof(*ioatdca) +
- (sizeof(struct ioat_dca_slot) * IOAT_DCA_MAX_REQ));
+ (sizeof(struct ioat_dca_slot) * max_requesters));
if (!dca)
return NULL;
ioatdca = dca_priv(dca);
- ioatdca->max_requesters = IOAT_DCA_MAX_REQ;
-
+ ioatdca->max_requesters = max_requesters;
ioatdca->dca_base = iobase + 0x54;
/* copy over the APIC ID to DCA tag mapping */
@@ -323,11 +355,13 @@ static int ioat2_dca_remove_requester(struct dca_provider *dca,
return -ENODEV;
}
-static u8 ioat2_dca_get_tag(struct dca_provider *dca, int cpu)
+static u8 ioat2_dca_get_tag(struct dca_provider *dca,
+ struct device *dev,
+ int cpu)
{
u8 tag;
- tag = ioat_dca_get_tag(dca, cpu);
+ tag = ioat_dca_get_tag(dca, dev, cpu);
tag = (~tag) & 0x1F;
return tag;
}
@@ -336,6 +370,7 @@ static struct dca_ops ioat2_dca_ops = {
.add_requester = ioat2_dca_add_requester,
.remove_requester = ioat2_dca_remove_requester,
.get_tag = ioat2_dca_get_tag,
+ .dev_managed = ioat_dca_dev_managed,
};
static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
@@ -425,3 +460,198 @@ struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
return dca;
}
+
+static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
+{
+ struct ioat_dca_priv *ioatdca = dca_priv(dca);
+ struct pci_dev *pdev;
+ int i;
+ u16 id;
+ u16 global_req_table;
+
+ /* This implementation only supports PCI-Express */
+ if (dev->bus != &pci_bus_type)
+ return -ENODEV;
+ pdev = to_pci_dev(dev);
+ id = dcaid_from_pcidev(pdev);
+
+ if (ioatdca->requester_count == ioatdca->max_requesters)
+ return -ENODEV;
+
+ for (i = 0; i < ioatdca->max_requesters; i++) {
+ if (ioatdca->req_slots[i].pdev == NULL) {
+ /* found an empty slot */
+ ioatdca->requester_count++;
+ ioatdca->req_slots[i].pdev = pdev;
+ ioatdca->req_slots[i].rid = id;
+ global_req_table =
+ readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
+ writel(id | IOAT_DCA_GREQID_VALID,
+ ioatdca->iobase + global_req_table + (i * 4));
+ return i;
+ }
+ }
+ /* Error, ioatdma->requester_count is out of whack */
+ return -EFAULT;
+}
+
+static int ioat3_dca_remove_requester(struct dca_provider *dca,
+ struct device *dev)
+{
+ struct ioat_dca_priv *ioatdca = dca_priv(dca);
+ struct pci_dev *pdev;
+ int i;
+ u16 global_req_table;
+
+ /* This implementation only supports PCI-Express */
+ if (dev->bus != &pci_bus_type)
+ return -ENODEV;
+ pdev = to_pci_dev(dev);
+
+ for (i = 0; i < ioatdca->max_requesters; i++) {
+ if (ioatdca->req_slots[i].pdev == pdev) {
+ global_req_table =
+ readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
+ writel(0, ioatdca->iobase + global_req_table + (i * 4));
+ ioatdca->req_slots[i].pdev = NULL;
+ ioatdca->req_slots[i].rid = 0;
+ ioatdca->requester_count--;
+ return i;
+ }
+ }
+ return -ENODEV;
+}
+
+static u8 ioat3_dca_get_tag(struct dca_provider *dca,
+ struct device *dev,
+ int cpu)
+{
+ u8 tag;
+
+ struct ioat_dca_priv *ioatdca = dca_priv(dca);
+ int i, apic_id, bit, value;
+ u8 entry;
+
+ tag = 0;
+ apic_id = cpu_physical_id(cpu);
+
+ for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
+ entry = ioatdca->tag_map[i];
+ if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
+ bit = entry &
+ ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
+ value = (apic_id & (1 << bit)) ? 1 : 0;
+ } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
+ bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
+ value = (apic_id & (1 << bit)) ? 0 : 1;
+ } else {
+ value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
+ }
+ tag |= (value << i);
+ }
+
+ return tag;
+}
+
+static struct dca_ops ioat3_dca_ops = {
+ .add_requester = ioat3_dca_add_requester,
+ .remove_requester = ioat3_dca_remove_requester,
+ .get_tag = ioat3_dca_get_tag,
+ .dev_managed = ioat_dca_dev_managed,
+};
+
+static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
+{
+ int slots = 0;
+ u32 req;
+ u16 global_req_table;
+
+ global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
+ if (global_req_table == 0)
+ return 0;
+
+ do {
+ req = readl(iobase + global_req_table + (slots * sizeof(u32)));
+ slots++;
+ } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
+
+ return slots;
+}
+
+struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+{
+ struct dca_provider *dca;
+ struct ioat_dca_priv *ioatdca;
+ int slots;
+ int i;
+ int err;
+ u16 dca_offset;
+ u16 csi_fsb_control;
+ u16 pcie_control;
+ u8 bit;
+
+ union {
+ u64 full;
+ struct {
+ u32 low;
+ u32 high;
+ };
+ } tag_map;
+
+ if (!system_has_dca_enabled(pdev))
+ return NULL;
+
+ dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
+ if (dca_offset == 0)
+ return NULL;
+
+ slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
+ if (slots == 0)
+ return NULL;
+
+ dca = alloc_dca_provider(&ioat3_dca_ops,
+ sizeof(*ioatdca)
+ + (sizeof(struct ioat_dca_slot) * slots));
+ if (!dca)
+ return NULL;
+
+ ioatdca = dca_priv(dca);
+ ioatdca->iobase = iobase;
+ ioatdca->dca_base = iobase + dca_offset;
+ ioatdca->max_requesters = slots;
+
+ /* some bios might not know to turn these on */
+ csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
+ if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
+ csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
+ writew(csi_fsb_control,
+ ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
+ }
+ pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
+ if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
+ pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
+ writew(pcie_control,
+ ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
+ }
+
+
+ /* TODO version, compatibility and configuration checks */
+
+ /* copy out the APIC to DCA tag map */
+ tag_map.low =
+ readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
+ tag_map.high =
+ readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
+ for (i = 0; i < 8; i++) {
+ bit = tag_map.full >> (8 * i);
+ ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
+ }
+
+ err = register_dca_provider(dca, &pdev->dev);
+ if (err) {
+ free_dca_provider(dca);
+ return NULL;
+ }
+
+ return dca;
+}
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 318e8a22d81..a52156e5688 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -32,6 +32,7 @@
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
#include "ioatdma.h"
#include "ioatdma_registers.h"
#include "ioatdma_hw.h"
@@ -41,11 +42,23 @@
#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
+#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
static int ioat_pending_level = 4;
module_param(ioat_pending_level, int, 0644);
MODULE_PARM_DESC(ioat_pending_level,
"high-water mark for pushing ioat descriptors (default: 4)");
+#define RESET_DELAY msecs_to_jiffies(100)
+#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000))
+static void ioat_dma_chan_reset_part2(struct work_struct *work);
+static void ioat_dma_chan_watchdog(struct work_struct *work);
+
+/*
+ * workaround for IOAT ver.3.0 null descriptor issue
+ * (channel returns error when size is 0)
+ */
+#define NULL_DESC_BUFFER_SIZE 1
+
/* internal functions */
static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
@@ -122,6 +135,38 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
int i;
struct ioat_dma_chan *ioat_chan;
+ /*
+ * IOAT ver.3 workarounds
+ */
+ if (device->version == IOAT_VER_3_0) {
+ u32 chan_err_mask;
+ u16 dev_id;
+ u32 dmauncerrsts;
+
+ /*
+ * Write CHANERRMSK_INT with 3E07h to mask out the errors
+ * that can cause stability issues for IOAT ver.3
+ */
+ chan_err_mask = 0x3E07;
+ pci_write_config_dword(device->pdev,
+ IOAT_PCI_CHANERRMASK_INT_OFFSET,
+ chan_err_mask);
+
+ /*
+ * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+ * (workaround for spurious config parity error after restart)
+ */
+ pci_read_config_word(device->pdev,
+ IOAT_PCI_DEVICE_ID_OFFSET,
+ &dev_id);
+ if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
+ dmauncerrsts = 0x10;
+ pci_write_config_dword(device->pdev,
+ IOAT_PCI_DMAUNCERRSTS_OFFSET,
+ dmauncerrsts);
+ }
+ }
+
device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
@@ -137,6 +182,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
ioat_chan->xfercap = xfercap;
ioat_chan->desccount = 0;
+ INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
if (ioat_chan->device->version != IOAT_VER_1_2) {
writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
| IOAT_DMA_DCA_ANY_CPU,
@@ -175,7 +221,7 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
- if (ioat_chan->pending != 0) {
+ if (ioat_chan->pending > 0) {
spin_lock_bh(&ioat_chan->desc_lock);
__ioat1_dma_memcpy_issue_pending(ioat_chan);
spin_unlock_bh(&ioat_chan->desc_lock);
@@ -194,13 +240,228 @@ static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
- if (ioat_chan->pending != 0) {
+ if (ioat_chan->pending > 0) {
spin_lock_bh(&ioat_chan->desc_lock);
__ioat2_dma_memcpy_issue_pending(ioat_chan);
spin_unlock_bh(&ioat_chan->desc_lock);
}
}
+
+/**
+ * ioat_dma_chan_reset_part2 - reinit the channel after a reset
+ */
+static void ioat_dma_chan_reset_part2(struct work_struct *work)
+{
+ struct ioat_dma_chan *ioat_chan =
+ container_of(work, struct ioat_dma_chan, work.work);
+ struct ioat_desc_sw *desc;
+
+ spin_lock_bh(&ioat_chan->cleanup_lock);
+ spin_lock_bh(&ioat_chan->desc_lock);
+
+ ioat_chan->completion_virt->low = 0;
+ ioat_chan->completion_virt->high = 0;
+ ioat_chan->pending = 0;
+
+ /*
+ * count the descriptors waiting, and be sure to do it
+ * right for both the CB1 line and the CB2 ring
+ */
+ ioat_chan->dmacount = 0;
+ if (ioat_chan->used_desc.prev) {
+ desc = to_ioat_desc(ioat_chan->used_desc.prev);
+ do {
+ ioat_chan->dmacount++;
+ desc = to_ioat_desc(desc->node.next);
+ } while (&desc->node != ioat_chan->used_desc.next);
+ }
+
+ /*
+ * write the new starting descriptor address
+ * this puts channel engine into ARMED state
+ */
+ desc = to_ioat_desc(ioat_chan->used_desc.prev);
+ switch (ioat_chan->device->version) {
+ case IOAT_VER_1_2:
+ writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+ ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
+ writel(((u64) desc->async_tx.phys) >> 32,
+ ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
+
+ writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
+ + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
+ break;
+ case IOAT_VER_2_0:
+ writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+ ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
+ writel(((u64) desc->async_tx.phys) >> 32,
+ ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
+
+ /* tell the engine to go with what's left to be done */
+ writew(ioat_chan->dmacount,
+ ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+
+ break;
+ }
+ dev_err(&ioat_chan->device->pdev->dev,
+ "chan%d reset - %d descs waiting, %d total desc\n",
+ chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
+
+ spin_unlock_bh(&ioat_chan->desc_lock);
+ spin_unlock_bh(&ioat_chan->cleanup_lock);
+}
+
+/**
+ * ioat_dma_reset_channel - restart a channel
+ * @ioat_chan: IOAT DMA channel handle
+ */
+static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
+{
+ u32 chansts, chanerr;
+
+ if (!ioat_chan->used_desc.prev)
+ return;
+
+ chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+ chansts = (ioat_chan->completion_virt->low
+ & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
+ if (chanerr) {
+ dev_err(&ioat_chan->device->pdev->dev,
+ "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
+ chan_num(ioat_chan), chansts, chanerr);
+ writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+ }
+
+ /*
+ * whack it upside the head with a reset
+ * and wait for things to settle out.
+ * force the pending count to a really big negative
+ * to make sure no one forces an issue_pending
+ * while we're waiting.
+ */
+
+ spin_lock_bh(&ioat_chan->desc_lock);
+ ioat_chan->pending = INT_MIN;
+ writeb(IOAT_CHANCMD_RESET,
+ ioat_chan->reg_base
+ + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
+ spin_unlock_bh(&ioat_chan->desc_lock);
+
+ /* schedule the 2nd half instead of sleeping a long time */
+ schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
+}
+
+/**
+ * ioat_dma_chan_watchdog - watch for stuck channels
+ */
+static void ioat_dma_chan_watchdog(struct work_struct *work)
+{
+ struct ioatdma_device *device =
+ container_of(work, struct ioatdma_device, work.work);
+ struct ioat_dma_chan *ioat_chan;
+ int i;
+
+ union {
+ u64 full;
+ struct {
+ u32 low;
+ u32 high;
+ };
+ } completion_hw;
+ unsigned long compl_desc_addr_hw;
+
+ for (i = 0; i < device->common.chancnt; i++) {
+ ioat_chan = ioat_lookup_chan_by_index(device, i);
+
+ if (ioat_chan->device->version == IOAT_VER_1_2
+ /* have we started processing anything yet */
+ && ioat_chan->last_completion
+ /* have we completed any since last watchdog cycle? */
+ && (ioat_chan->last_completion ==
+ ioat_chan->watchdog_completion)
+ /* has TCP stuck on one cookie since last watchdog? */
+ && (ioat_chan->watchdog_tcp_cookie ==
+ ioat_chan->watchdog_last_tcp_cookie)
+ && (ioat_chan->watchdog_tcp_cookie !=
+ ioat_chan->completed_cookie)
+ /* is there something in the chain to be processed? */
+ /* CB1 chain always has at least the last one processed */
+ && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
+ && ioat_chan->pending == 0) {
+
+ /*
+ * check CHANSTS register for completed
+ * descriptor address.
+ * if it is different than completion writeback,
+ * it is not zero
+ * and it has changed since the last watchdog
+ * we can assume that channel
+ * is still working correctly
+ * and the problem is in completion writeback.
+ * update completion writeback
+ * with actual CHANSTS value
+ * else
+ * try resetting the channel
+ */
+
+ completion_hw.low = readl(ioat_chan->reg_base +
+ IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
+ completion_hw.high = readl(ioat_chan->reg_base +
+ IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
+#if (BITS_PER_LONG == 64)
+ compl_desc_addr_hw =
+ completion_hw.full
+ & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
+#else
+ compl_desc_addr_hw =
+ completion_hw.low & IOAT_LOW_COMPLETION_MASK;
+#endif
+
+ if ((compl_desc_addr_hw != 0)
+ && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
+ && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
+ ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
+ ioat_chan->completion_virt->low = completion_hw.low;
+ ioat_chan->completion_virt->high = completion_hw.high;
+ } else {
+ ioat_dma_reset_channel(ioat_chan);
+ ioat_chan->watchdog_completion = 0;
+ ioat_chan->last_compl_desc_addr_hw = 0;
+ }
+
+ /*
+ * for version 2.0 if there are descriptors yet to be processed
+ * and the last completed hasn't changed since the last watchdog
+ * if they haven't hit the pending level
+ * issue the pending to push them through
+ * else
+ * try resetting the channel
+ */
+ } else if (ioat_chan->device->version == IOAT_VER_2_0
+ && ioat_chan->used_desc.prev
+ && ioat_chan->last_completion
+ && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
+
+ if (ioat_chan->pending < ioat_pending_level)
+ ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
+ else {
+ ioat_dma_reset_channel(ioat_chan);
+ ioat_chan->watchdog_completion = 0;
+ }
+ } else {
+ ioat_chan->last_compl_desc_addr_hw = 0;
+ ioat_chan->watchdog_completion
+ = ioat_chan->last_completion;
+ }
+
+ ioat_chan->watchdog_last_tcp_cookie =
+ ioat_chan->watchdog_tcp_cookie;
+ }
+
+ schedule_delayed_work(&device->work, WATCHDOG_DELAY);
+}
+
static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
@@ -250,6 +511,13 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
prev = new;
} while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
+ if (!new) {
+ dev_err(&ioat_chan->device->pdev->dev,
+ "tx submit failed\n");
+ spin_unlock_bh(&ioat_chan->desc_lock);
+ return -ENOMEM;
+ }
+
hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
if (new->async_tx.callback) {
hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
@@ -335,7 +603,14 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
desc_count++;
} while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
- hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
+ if (!new) {
+ dev_err(&ioat_chan->device->pdev->dev,
+ "tx submit failed\n");
+ spin_unlock_bh(&ioat_chan->desc_lock);
+ return -ENOMEM;
+ }
+
+ hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
if (new->async_tx.callback) {
hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
if (first != new) {
@@ -406,6 +681,7 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
desc_sw->async_tx.tx_submit = ioat1_tx_submit;
break;
case IOAT_VER_2_0:
+ case IOAT_VER_3_0:
desc_sw->async_tx.tx_submit = ioat2_tx_submit;
break;
}
@@ -452,7 +728,8 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
* ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
* @chan: the channel to be filled out
*/
-static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
+static int ioat_dma_alloc_chan_resources(struct dma_chan *chan,
+ struct dma_client *client)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
struct ioat_desc_sw *desc;
@@ -555,6 +832,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
}
break;
case IOAT_VER_2_0:
+ case IOAT_VER_3_0:
list_for_each_entry_safe(desc, _desc,
ioat_chan->free_desc.next, node) {
list_del(&desc->node);
@@ -585,6 +863,10 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
ioat_chan->last_completion = ioat_chan->completion_addr = 0;
ioat_chan->pending = 0;
ioat_chan->dmacount = 0;
+ ioat_chan->watchdog_completion = 0;
+ ioat_chan->last_compl_desc_addr_hw = 0;
+ ioat_chan->watchdog_tcp_cookie =
+ ioat_chan->watchdog_last_tcp_cookie = 0;
}
/**
@@ -640,7 +922,8 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
/* set up the noop descriptor */
noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
- noop_desc->hw->size = 0;
+ /* set size to non-zero value (channel returns error when size is 0) */
+ noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
noop_desc->hw->src_addr = 0;
noop_desc->hw->dst_addr = 0;
@@ -690,6 +973,7 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
return ioat1_dma_get_next_descriptor(ioat_chan);
break;
case IOAT_VER_2_0:
+ case IOAT_VER_3_0:
return ioat2_dma_get_next_descriptor(ioat_chan);
break;
}
@@ -716,8 +1000,12 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
new->src = dma_src;
new->async_tx.flags = flags;
return &new->async_tx;
- } else
+ } else {
+ dev_err(&ioat_chan->device->pdev->dev,
+ "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
+ chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
return NULL;
+ }
}
static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
@@ -744,8 +1032,13 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
new->src = dma_src;
new->async_tx.flags = flags;
return &new->async_tx;
- } else
+ } else {
+ spin_unlock_bh(&ioat_chan->desc_lock);
+ dev_err(&ioat_chan->device->pdev->dev,
+ "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
+ chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
return NULL;
+ }
}
static void ioat_dma_cleanup_tasklet(unsigned long data)
@@ -756,6 +1049,27 @@ static void ioat_dma_cleanup_tasklet(unsigned long data)
chan->reg_base + IOAT_CHANCTRL_OFFSET);
}
+static void
+ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
+{
+ /*
+ * yes we are unmapping both _page and _single
+ * alloc'd regions with unmap_page. Is this
+ * *really* that bad?
+ */
+ if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP))
+ pci_unmap_page(ioat_chan->device->pdev,
+ pci_unmap_addr(desc, dst),
+ pci_unmap_len(desc, len),
+ PCI_DMA_FROMDEVICE);
+
+ if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP))
+ pci_unmap_page(ioat_chan->device->pdev,
+ pci_unmap_addr(desc, src),
+ pci_unmap_len(desc, len),
+ PCI_DMA_TODEVICE);
+}
+
/**
* ioat_dma_memcpy_cleanup - cleanup up finished descriptors
* @chan: ioat channel to be cleaned up
@@ -799,11 +1113,27 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
if (phys_complete == ioat_chan->last_completion) {
spin_unlock_bh(&ioat_chan->cleanup_lock);
+ /*
+ * perhaps we're stuck so hard that the watchdog can't go off?
+ * try to catch it after 2 seconds
+ */
+ if (ioat_chan->device->version != IOAT_VER_3_0) {
+ if (time_after(jiffies,
+ ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
+ ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
+ ioat_chan->last_completion_time = jiffies;
+ }
+ }
return;
}
+ ioat_chan->last_completion_time = jiffies;
cookie = 0;
- spin_lock_bh(&ioat_chan->desc_lock);
+ if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
+ spin_unlock_bh(&ioat_chan->cleanup_lock);
+ return;
+ }
+
switch (ioat_chan->device->version) {
case IOAT_VER_1_2:
list_for_each_entry_safe(desc, _desc,
@@ -816,21 +1146,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
*/
if (desc->async_tx.cookie) {
cookie = desc->async_tx.cookie;
-
- /*
- * yes we are unmapping both _page and _single
- * alloc'd regions with unmap_page. Is this
- * *really* that bad?
- */
- pci_unmap_page(ioat_chan->device->pdev,
- pci_unmap_addr(desc, dst),
- pci_unmap_len(desc, len),
- PCI_DMA_FROMDEVICE);
- pci_unmap_page(ioat_chan->device->pdev,
- pci_unmap_addr(desc, src),
- pci_unmap_len(desc, len),
- PCI_DMA_TODEVICE);
-
+ ioat_dma_unmap(ioat_chan, desc);
if (desc->async_tx.callback) {
desc->async_tx.callback(desc->async_tx.callback_param);
desc->async_tx.callback = NULL;
@@ -862,6 +1178,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
}
break;
case IOAT_VER_2_0:
+ case IOAT_VER_3_0:
/* has some other thread has already cleaned up? */
if (ioat_chan->used_desc.prev == NULL)
break;
@@ -889,16 +1206,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
if (desc->async_tx.cookie) {
cookie = desc->async_tx.cookie;
desc->async_tx.cookie = 0;
-
- pci_unmap_page(ioat_chan->device->pdev,
- pci_unmap_addr(desc, dst),
- pci_unmap_len(desc, len),
- PCI_DMA_FROMDEVICE);
- pci_unmap_page(ioat_chan->device->pdev,
- pci_unmap_addr(desc, src),
- pci_unmap_len(desc, len),
- PCI_DMA_TODEVICE);
-
+ ioat_dma_unmap(ioat_chan, desc);
if (desc->async_tx.callback) {
desc->async_tx.callback(desc->async_tx.callback_param);
desc->async_tx.callback = NULL;
@@ -943,6 +1251,7 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
last_used = chan->cookie;
last_complete = ioat_chan->completed_cookie;
+ ioat_chan->watchdog_tcp_cookie = cookie;
if (done)
*done = last_complete;
@@ -973,10 +1282,19 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
spin_lock_bh(&ioat_chan->desc_lock);
desc = ioat_dma_get_next_descriptor(ioat_chan);
+
+ if (!desc) {
+ dev_err(&ioat_chan->device->pdev->dev,
+ "Unable to start null desc - get next desc failed\n");
+ spin_unlock_bh(&ioat_chan->desc_lock);
+ return;
+ }
+
desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
| IOAT_DMA_DESCRIPTOR_CTL_INT_GN
| IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
- desc->hw->size = 0;
+ /* set size to non-zero value (channel returns error when size is 0) */
+ desc->hw->size = NULL_DESC_BUFFER_SIZE;
desc->hw->src_addr = 0;
desc->hw->dst_addr = 0;
async_tx_ack(&desc->async_tx);
@@ -994,6 +1312,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
+ IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
break;
case IOAT_VER_2_0:
+ case IOAT_VER_3_0:
writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
writel(((u64) desc->async_tx.phys) >> 32,
@@ -1049,7 +1368,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
dma_chan = container_of(device->common.channels.next,
struct dma_chan,
device_node);
- if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
+ if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) {
dev_err(&device->pdev->dev,
"selftest cannot allocate chan resource\n");
err = -ENODEV;
@@ -1312,6 +1631,7 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
ioat1_dma_memcpy_issue_pending;
break;
case IOAT_VER_2_0:
+ case IOAT_VER_3_0:
device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
device->common.device_issue_pending =
ioat2_dma_memcpy_issue_pending;
@@ -1331,8 +1651,16 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
if (err)
goto err_self_test;
+ ioat_set_tcp_copy_break(device);
+
dma_async_device_register(&device->common);
+ if (device->version != IOAT_VER_3_0) {
+ INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
+ schedule_delayed_work(&device->work,
+ WATCHDOG_DELAY);
+ }
+
return device;
err_self_test:
@@ -1365,6 +1693,10 @@ void ioat_dma_remove(struct ioatdma_device *device)
pci_release_regions(device->pdev);
pci_disable_device(device->pdev);
+ if (device->version != IOAT_VER_3_0) {
+ cancel_delayed_work(&device->work);
+ }
+
list_for_each_entry_safe(chan, _chan,
&device->common.channels, device_node) {
ioat_chan = to_ioat_chan(chan);
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h
index f2c7fedbf00..a3306d0e137 100644
--- a/drivers/dma/ioatdma.h
+++ b/drivers/dma/ioatdma.h
@@ -27,8 +27,9 @@
#include <linux/dmapool.h>
#include <linux/cache.h>
#include <linux/pci_ids.h>
+#include <net/tcp.h>
-#define IOAT_DMA_VERSION "2.04"
+#define IOAT_DMA_VERSION "3.30"
enum ioat_interrupt {
none = 0,
@@ -40,6 +41,7 @@ enum ioat_interrupt {
#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
#define IOAT_DMA_DCA_ANY_CPU ~0
+#define IOAT_WATCHDOG_PERIOD (2 * HZ)
/**
@@ -62,6 +64,7 @@ struct ioatdma_device {
struct dma_device common;
u8 version;
enum ioat_interrupt irq_mode;
+ struct delayed_work work;
struct msix_entry msix_entries[4];
struct ioat_dma_chan *idx[4];
};
@@ -75,6 +78,7 @@ struct ioat_dma_chan {
dma_cookie_t completed_cookie;
unsigned long last_completion;
+ unsigned long last_completion_time;
size_t xfercap; /* XFERCAP register value expanded out */
@@ -82,6 +86,10 @@ struct ioat_dma_chan {
spinlock_t desc_lock;
struct list_head free_desc;
struct list_head used_desc;
+ unsigned long watchdog_completion;
+ int watchdog_tcp_cookie;
+ u32 watchdog_last_tcp_cookie;
+ struct delayed_work work;
int pending;
int dmacount;
@@ -98,6 +106,7 @@ struct ioat_dma_chan {
u32 high;
};
} *completion_virt;
+ unsigned long last_compl_desc_addr_hw;
struct tasklet_struct cleanup_task;
};
@@ -121,17 +130,34 @@ struct ioat_desc_sw {
struct dma_async_tx_descriptor async_tx;
};
+static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev)
+{
+ #ifdef CONFIG_NET_DMA
+ switch (dev->version) {
+ case IOAT_VER_1_2:
+ case IOAT_VER_3_0:
+ sysctl_tcp_dma_copybreak = 4096;
+ break;
+ case IOAT_VER_2_0:
+ sysctl_tcp_dma_copybreak = 2048;
+ break;
+ }
+ #endif
+}
+
#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE)
struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
void __iomem *iobase);
void ioat_dma_remove(struct ioatdma_device *device);
struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
#else
#define ioat_dma_probe(pdev, iobase) NULL
#define ioat_dma_remove(device) do { } while (0)
#define ioat_dca_init(pdev, iobase) NULL
#define ioat2_dca_init(pdev, iobase) NULL
+#define ioat3_dca_init(pdev, iobase) NULL
#endif
#endif /* IOATDMA_H */
diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioatdma_hw.h
index dd470fa91d8..f1ae2c776f7 100644
--- a/drivers/dma/ioatdma_hw.h
+++ b/drivers/dma/ioatdma_hw.h
@@ -35,6 +35,7 @@
#define IOAT_PCI_SID 0x8086
#define IOAT_VER_1_2 0x12 /* Version 1.2 */
#define IOAT_VER_2_0 0x20 /* Version 2.0 */
+#define IOAT_VER_3_0 0x30 /* Version 3.0 */
struct ioat_dma_descriptor {
uint32_t size;
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioatdma_registers.h
index 9832d7ebd93..827cb503cac 100644
--- a/drivers/dma/ioatdma_registers.h
+++ b/drivers/dma/ioatdma_registers.h
@@ -25,6 +25,10 @@
#define IOAT_PCI_DMACTRL_DMA_EN 0x00000001
#define IOAT_PCI_DMACTRL_MSI_EN 0x00000002
+#define IOAT_PCI_DEVICE_ID_OFFSET 0x02
+#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
+#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
+
/* MMIO Device Registers */
#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */
@@ -149,7 +153,23 @@
#define IOAT_DCA_GREQID_VALID 0x20000000
#define IOAT_DCA_GREQID_LASTID 0x80000000
+#define IOAT3_CSI_CAPABILITY_OFFSET 0x08
+#define IOAT3_CSI_CAPABILITY_PREFETCH 0x1
+
+#define IOAT3_PCI_CAPABILITY_OFFSET 0x0A
+#define IOAT3_PCI_CAPABILITY_MEMWR 0x1
+
+#define IOAT3_CSI_CONTROL_OFFSET 0x0C
+#define IOAT3_CSI_CONTROL_PREFETCH 0x1
+
+#define IOAT3_PCI_CONTROL_OFFSET 0x0E
+#define IOAT3_PCI_CONTROL_MEMWR 0x1
+
+#define IOAT3_APICID_TAG_MAP_OFFSET 0x10
+#define IOAT3_APICID_TAG_MAP_OFFSET_LOW 0x10
+#define IOAT3_APICID_TAG_MAP_OFFSET_HIGH 0x14
+#define IOAT3_DCA_GREQID_OFFSET 0x02
#define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */
#define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 0ec0f431e6a..85bfeba4d85 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -82,17 +82,24 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
struct device *dev =
&iop_chan->device->pdev->dev;
u32 len = unmap->unmap_len;
- u32 src_cnt = unmap->unmap_src_cnt;
- dma_addr_t addr = iop_desc_get_dest_addr(unmap,
- iop_chan);
-
- dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
- while (src_cnt--) {
- addr = iop_desc_get_src_addr(unmap,
- iop_chan,
- src_cnt);
- dma_unmap_page(dev, addr, len,
- DMA_TO_DEVICE);
+ enum dma_ctrl_flags flags = desc->async_tx.flags;
+ u32 src_cnt;
+ dma_addr_t addr;
+
+ if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ addr = iop_desc_get_dest_addr(unmap, iop_chan);
+ dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
+ }
+
+ if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ src_cnt = unmap->unmap_src_cnt;
+ while (src_cnt--) {
+ addr = iop_desc_get_src_addr(unmap,
+ iop_chan,
+ src_cnt);
+ dma_unmap_page(dev, addr, len,
+ DMA_TO_DEVICE);
+ }
}
desc->group_head = NULL;
}
@@ -366,8 +373,8 @@ retry:
if (!retry++)
goto retry;
- /* try to free some slots if the allocation fails */
- tasklet_schedule(&iop_chan->irq_tasklet);
+ /* perform direct reclaim if the allocation fails */
+ __iop_adma_slot_cleanup(iop_chan);
return NULL;
}
@@ -443,8 +450,18 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
-/* returns the number of allocated descriptors */
-static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
+/**
+ * iop_adma_alloc_chan_resources - returns the number of allocated descriptors
+ * @chan - allocate descriptor resources for this channel
+ * @client - current client requesting the channel be ready for requests
+ *
+ * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To
+ * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
+ * greater than 2x the number slots needed to satisfy a device->max_xor
+ * request.
+ * */
+static int iop_adma_alloc_chan_resources(struct dma_chan *chan,
+ struct dma_client *client)
{
char *hw_desc;
int idx;
@@ -838,7 +855,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
dma_chan = container_of(device->common.channels.next,
struct dma_chan,
device_node);
- if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
+ if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
err = -ENODEV;
goto out;
}
@@ -936,7 +953,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
dma_chan = container_of(device->common.channels.next,
struct dma_chan,
device_node);
- if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
+ if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
err = -ENODEV;
goto out;
}
@@ -1387,6 +1404,8 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
spin_unlock_bh(&iop_chan->lock);
}
+MODULE_ALIAS("platform:iop-adma");
+
static struct platform_driver iop_adma_driver = {
.probe = iop_adma_probe,
.remove = iop_adma_remove,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
new file mode 100644
index 00000000000..a4e4494663b
--- /dev/null
+++ b/drivers/dma/mv_xor.c
@@ -0,0 +1,1375 @@
+/*
+ * offload engine driver for the Marvell XOR engine
+ * Copyright (C) 2007, 2008, Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/async_tx.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/memory.h>
+#include <asm/plat-orion/mv_xor.h>
+#include "mv_xor.h"
+
+static void mv_xor_issue_pending(struct dma_chan *chan);
+
+#define to_mv_xor_chan(chan) \
+ container_of(chan, struct mv_xor_chan, common)
+
+#define to_mv_xor_device(dev) \
+ container_of(dev, struct mv_xor_device, common)
+
+#define to_mv_xor_slot(tx) \
+ container_of(tx, struct mv_xor_desc_slot, async_tx)
+
+static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
+{
+ struct mv_xor_desc *hw_desc = desc->hw_desc;
+
+ hw_desc->status = (1 << 31);
+ hw_desc->phy_next_desc = 0;
+ hw_desc->desc_command = (1 << 31);
+}
+
+static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
+{
+ struct mv_xor_desc *hw_desc = desc->hw_desc;
+ return hw_desc->phy_dest_addr;
+}
+
+static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
+ int src_idx)
+{
+ struct mv_xor_desc *hw_desc = desc->hw_desc;
+ return hw_desc->phy_src_addr[src_idx];
+}
+
+
+static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
+ u32 byte_count)
+{
+ struct mv_xor_desc *hw_desc = desc->hw_desc;
+ hw_desc->byte_count = byte_count;
+}
+
+static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
+ u32 next_desc_addr)
+{
+ struct mv_xor_desc *hw_desc = desc->hw_desc;
+ BUG_ON(hw_desc->phy_next_desc);
+ hw_desc->phy_next_desc = next_desc_addr;
+}
+
+static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
+{
+ struct mv_xor_desc *hw_desc = desc->hw_desc;
+ hw_desc->phy_next_desc = 0;
+}
+
+static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
+{
+ desc->value = val;
+}
+
+static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
+ dma_addr_t addr)
+{
+ struct mv_xor_desc *hw_desc = desc->hw_desc;
+ hw_desc->phy_dest_addr = addr;
+}
+
+static int mv_chan_memset_slot_count(size_t len)
+{
+ return 1;
+}
+
+#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
+
+static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
+ int index, dma_addr_t addr)
+{
+ struct mv_xor_desc *hw_desc = desc->hw_desc;
+ hw_desc->phy_src_addr[index] = addr;
+ if (desc->type == DMA_XOR)
+ hw_desc->desc_command |= (1 << index);
+}
+
+static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
+{
+ return __raw_readl(XOR_CURR_DESC(chan));
+}
+
+static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
+ u32 next_desc_addr)
+{
+ __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
+}
+
+static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
+{
+ __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
+}
+
+static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
+{
+ __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
+}
+
+static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
+{
+ __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
+ __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
+}
+
+static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
+{
+ u32 val = __raw_readl(XOR_INTR_MASK(chan));
+ val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
+ __raw_writel(val, XOR_INTR_MASK(chan));
+}
+
+static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
+{
+ u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
+ intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
+ return intr_cause;
+}
+
+static int mv_is_err_intr(u32 intr_cause)
+{
+ if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
+ return 1;
+
+ return 0;
+}
+
+static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
+{
+ u32 val = (1 << (1 + (chan->idx * 16)));
+ dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
+ __raw_writel(val, XOR_INTR_CAUSE(chan));
+}
+
+static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
+{
+ u32 val = 0xFFFF0000 >> (chan->idx * 16);
+ __raw_writel(val, XOR_INTR_CAUSE(chan));
+}
+
+static int mv_can_chain(struct mv_xor_desc_slot *desc)
+{
+ struct mv_xor_desc_slot *chain_old_tail = list_entry(
+ desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
+
+ if (chain_old_tail->type != desc->type)
+ return 0;
+ if (desc->type == DMA_MEMSET)
+ return 0;
+
+ return 1;
+}
+
+static void mv_set_mode(struct mv_xor_chan *chan,
+ enum dma_transaction_type type)
+{
+ u32 op_mode;
+ u32 config = __raw_readl(XOR_CONFIG(chan));
+
+ switch (type) {
+ case DMA_XOR:
+ op_mode = XOR_OPERATION_MODE_XOR;
+ break;
+ case DMA_MEMCPY:
+ op_mode = XOR_OPERATION_MODE_MEMCPY;
+ break;
+ case DMA_MEMSET:
+ op_mode = XOR_OPERATION_MODE_MEMSET;
+ break;
+ default:
+ dev_printk(KERN_ERR, chan->device->common.dev,
+ "error: unsupported operation %d.\n",
+ type);
+ BUG();
+ return;
+ }
+
+ config &= ~0x7;
+ config |= op_mode;
+ __raw_writel(config, XOR_CONFIG(chan));
+ chan->current_type = type;
+}
+
+static void mv_chan_activate(struct mv_xor_chan *chan)
+{
+ u32 activation;
+
+ dev_dbg(chan->device->common.dev, " activate chan.\n");
+ activation = __raw_readl(XOR_ACTIVATION(chan));
+ activation |= 0x1;
+ __raw_writel(activation, XOR_ACTIVATION(chan));
+}
+
+static char mv_chan_is_busy(struct mv_xor_chan *chan)
+{
+ u32 state = __raw_readl(XOR_ACTIVATION(chan));
+
+ state = (state >> 4) & 0x3;
+
+ return (state == 1) ? 1 : 0;
+}
+
+static int mv_chan_xor_slot_count(size_t len, int src_cnt)
+{
+ return 1;
+}
+
+/**
+ * mv_xor_free_slots - flags descriptor slots for reuse
+ * @slot: Slot to free
+ * Caller must hold &mv_chan->lock while calling this function
+ */
+static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
+ struct mv_xor_desc_slot *slot)
+{
+ dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
+ __func__, __LINE__, slot);
+
+ slot->slots_per_op = 0;
+
+}
+
+/*
+ * mv_xor_start_new_chain - program the engine to operate on new chain headed by
+ * sw_desc
+ * Caller must hold &mv_chan->lock while calling this function
+ */
+static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
+ struct mv_xor_desc_slot *sw_desc)
+{
+ dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
+ __func__, __LINE__, sw_desc);
+ if (sw_desc->type != mv_chan->current_type)
+ mv_set_mode(mv_chan, sw_desc->type);
+
+ if (sw_desc->type == DMA_MEMSET) {
+ /* for memset requests we need to program the engine, no
+ * descriptors used.
+ */
+ struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
+ mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
+ mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
+ mv_chan_set_value(mv_chan, sw_desc->value);
+ } else {
+ /* set the hardware chain */
+ mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
+ }
+ mv_chan->pending += sw_desc->slot_cnt;
+ mv_xor_issue_pending(&mv_chan->common);
+}
+
+static dma_cookie_t
+mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
+ struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
+{
+ BUG_ON(desc->async_tx.cookie < 0);
+
+ if (desc->async_tx.cookie > 0) {
+ cookie = desc->async_tx.cookie;
+
+ /* call the callback (must not sleep or submit new
+ * operations to this channel)
+ */
+ if (desc->async_tx.callback)
+ desc->async_tx.callback(
+ desc->async_tx.callback_param);
+
+ /* unmap dma addresses
+ * (unmap_single vs unmap_page?)
+ */
+ if (desc->group_head && desc->unmap_len) {
+ struct mv_xor_desc_slot *unmap = desc->group_head;
+ struct device *dev =
+ &mv_chan->device->pdev->dev;
+ u32 len = unmap->unmap_len;
+ enum dma_ctrl_flags flags = desc->async_tx.flags;
+ u32 src_cnt;
+ dma_addr_t addr;
+
+ if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ addr = mv_desc_get_dest_addr(unmap);
+ dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
+ }
+
+ if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ src_cnt = unmap->unmap_src_cnt;
+ while (src_cnt--) {
+ addr = mv_desc_get_src_addr(unmap,
+ src_cnt);
+ dma_unmap_page(dev, addr, len,
+ DMA_TO_DEVICE);
+ }
+ }
+ desc->group_head = NULL;
+ }
+ }
+
+ /* run dependent operations */
+ async_tx_run_dependencies(&desc->async_tx);
+
+ return cookie;
+}
+
+static int
+mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
+{
+ struct mv_xor_desc_slot *iter, *_iter;
+
+ dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
+ list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
+ completed_node) {
+
+ if (async_tx_test_ack(&iter->async_tx)) {
+ list_del(&iter->completed_node);
+ mv_xor_free_slots(mv_chan, iter);
+ }
+ }
+ return 0;
+}
+
+static int
+mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
+ struct mv_xor_chan *mv_chan)
+{
+ dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
+ __func__, __LINE__, desc, desc->async_tx.flags);
+ list_del(&desc->chain_node);
+ /* the client is allowed to attach dependent operations
+ * until 'ack' is set
+ */
+ if (!async_tx_test_ack(&desc->async_tx)) {
+ /* move this slot to the completed_slots */
+ list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
+ return 0;
+ }
+
+ mv_xor_free_slots(mv_chan, desc);
+ return 0;
+}
+
+static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
+{
+ struct mv_xor_desc_slot *iter, *_iter;
+ dma_cookie_t cookie = 0;
+ int busy = mv_chan_is_busy(mv_chan);
+ u32 current_desc = mv_chan_get_current_desc(mv_chan);
+ int seen_current = 0;
+
+ dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
+ dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
+ mv_xor_clean_completed_slots(mv_chan);
+
+ /* free completed slots from the chain starting with
+ * the oldest descriptor
+ */
+
+ list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
+ chain_node) {
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+
+ /* do not advance past the current descriptor loaded into the
+ * hardware channel, subsequent descriptors are either in
+ * process or have not been submitted
+ */
+ if (seen_current)
+ break;
+
+ /* stop the search if we reach the current descriptor and the
+ * channel is busy
+ */
+ if (iter->async_tx.phys == current_desc) {
+ seen_current = 1;
+ if (busy)
+ break;
+ }
+
+ cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
+
+ if (mv_xor_clean_slot(iter, mv_chan))
+ break;
+ }
+
+ if ((busy == 0) && !list_empty(&mv_chan->chain)) {
+ struct mv_xor_desc_slot *chain_head;
+ chain_head = list_entry(mv_chan->chain.next,
+ struct mv_xor_desc_slot,
+ chain_node);
+
+ mv_xor_start_new_chain(mv_chan, chain_head);
+ }
+
+ if (cookie > 0)
+ mv_chan->completed_cookie = cookie;
+}
+
+static void
+mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
+{
+ spin_lock_bh(&mv_chan->lock);
+ __mv_xor_slot_cleanup(mv_chan);
+ spin_unlock_bh(&mv_chan->lock);
+}
+
+static void mv_xor_tasklet(unsigned long data)
+{
+ struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
+ __mv_xor_slot_cleanup(chan);
+}
+
+static struct mv_xor_desc_slot *
+mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
+ int slots_per_op)
+{
+ struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
+ LIST_HEAD(chain);
+ int slots_found, retry = 0;
+
+ /* start search from the last allocated descrtiptor
+ * if a contiguous allocation can not be found start searching
+ * from the beginning of the list
+ */
+retry:
+ slots_found = 0;
+ if (retry == 0)
+ iter = mv_chan->last_used;
+ else
+ iter = list_entry(&mv_chan->all_slots,
+ struct mv_xor_desc_slot,
+ slot_node);
+
+ list_for_each_entry_safe_continue(
+ iter, _iter, &mv_chan->all_slots, slot_node) {
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+ if (iter->slots_per_op) {
+ /* give up after finding the first busy slot
+ * on the second pass through the list
+ */
+ if (retry)
+ break;
+
+ slots_found = 0;
+ continue;
+ }
+
+ /* start the allocation if the slot is correctly aligned */
+ if (!slots_found++)
+ alloc_start = iter;
+
+ if (slots_found == num_slots) {
+ struct mv_xor_desc_slot *alloc_tail = NULL;
+ struct mv_xor_desc_slot *last_used = NULL;
+ iter = alloc_start;
+ while (num_slots) {
+ int i;
+
+ /* pre-ack all but the last descriptor */
+ async_tx_ack(&iter->async_tx);
+
+ list_add_tail(&iter->chain_node, &chain);
+ alloc_tail = iter;
+ iter->async_tx.cookie = 0;
+ iter->slot_cnt = num_slots;
+ iter->xor_check_result = NULL;
+ for (i = 0; i < slots_per_op; i++) {
+ iter->slots_per_op = slots_per_op - i;
+ last_used = iter;
+ iter = list_entry(iter->slot_node.next,
+ struct mv_xor_desc_slot,
+ slot_node);
+ }
+ num_slots -= slots_per_op;
+ }
+ alloc_tail->group_head = alloc_start;
+ alloc_tail->async_tx.cookie = -EBUSY;
+ list_splice(&chain, &alloc_tail->async_tx.tx_list);
+ mv_chan->last_used = last_used;
+ mv_desc_clear_next_desc(alloc_start);
+ mv_desc_clear_next_desc(alloc_tail);
+ return alloc_tail;
+ }
+ }
+ if (!retry++)
+ goto retry;
+
+ /* try to free some slots if the allocation fails */
+ tasklet_schedule(&mv_chan->irq_tasklet);
+
+ return NULL;
+}
+
+static dma_cookie_t
+mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
+ struct mv_xor_desc_slot *desc)
+{
+ dma_cookie_t cookie = mv_chan->common.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+ mv_chan->common.cookie = desc->async_tx.cookie = cookie;
+ return cookie;
+}
+
+/************************ DMA engine API functions ****************************/
+static dma_cookie_t
+mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
+ struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
+ struct mv_xor_desc_slot *grp_start, *old_chain_tail;
+ dma_cookie_t cookie;
+ int new_hw_chain = 1;
+
+ dev_dbg(mv_chan->device->common.dev,
+ "%s sw_desc %p: async_tx %p\n",
+ __func__, sw_desc, &sw_desc->async_tx);
+
+ grp_start = sw_desc->group_head;
+
+ spin_lock_bh(&mv_chan->lock);
+ cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
+
+ if (list_empty(&mv_chan->chain))
+ list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain);
+ else {
+ new_hw_chain = 0;
+
+ old_chain_tail = list_entry(mv_chan->chain.prev,
+ struct mv_xor_desc_slot,
+ chain_node);
+ list_splice_init(&grp_start->async_tx.tx_list,
+ &old_chain_tail->chain_node);
+
+ if (!mv_can_chain(grp_start))
+ goto submit_done;
+
+ dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
+ old_chain_tail->async_tx.phys);
+
+ /* fix up the hardware chain */
+ mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
+
+ /* if the channel is not busy */
+ if (!mv_chan_is_busy(mv_chan)) {
+ u32 current_desc = mv_chan_get_current_desc(mv_chan);
+ /*
+ * and the curren desc is the end of the chain before
+ * the append, then we need to start the channel
+ */
+ if (current_desc == old_chain_tail->async_tx.phys)
+ new_hw_chain = 1;
+ }
+ }
+
+ if (new_hw_chain)
+ mv_xor_start_new_chain(mv_chan, grp_start);
+
+submit_done:
+ spin_unlock_bh(&mv_chan->lock);
+
+ return cookie;
+}
+
+/* returns the number of allocated descriptors */
+static int mv_xor_alloc_chan_resources(struct dma_chan *chan,
+ struct dma_client *client)
+{
+ char *hw_desc;
+ int idx;
+ struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+ struct mv_xor_desc_slot *slot = NULL;
+ struct mv_xor_platform_data *plat_data =
+ mv_chan->device->pdev->dev.platform_data;
+ int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
+
+ /* Allocate descriptor slots */
+ idx = mv_chan->slots_allocated;
+ while (idx < num_descs_in_pool) {
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot) {
+ printk(KERN_INFO "MV XOR Channel only initialized"
+ " %d descriptor slots", idx);
+ break;
+ }
+ hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
+ slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+
+ dma_async_tx_descriptor_init(&slot->async_tx, chan);
+ slot->async_tx.tx_submit = mv_xor_tx_submit;
+ INIT_LIST_HEAD(&slot->chain_node);
+ INIT_LIST_HEAD(&slot->slot_node);
+ INIT_LIST_HEAD(&slot->async_tx.tx_list);
+ hw_desc = (char *) mv_chan->device->dma_desc_pool;
+ slot->async_tx.phys =
+ (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+ slot->idx = idx++;
+
+ spin_lock_bh(&mv_chan->lock);
+ mv_chan->slots_allocated = idx;
+ list_add_tail(&slot->slot_node, &mv_chan->all_slots);
+ spin_unlock_bh(&mv_chan->lock);
+ }
+
+ if (mv_chan->slots_allocated && !mv_chan->last_used)
+ mv_chan->last_used = list_entry(mv_chan->all_slots.next,
+ struct mv_xor_desc_slot,
+ slot_node);
+
+ dev_dbg(mv_chan->device->common.dev,
+ "allocated %d descriptor slots last_used: %p\n",
+ mv_chan->slots_allocated, mv_chan->last_used);
+
+ return mv_chan->slots_allocated ? : -ENOMEM;
+}
+
+static struct dma_async_tx_descriptor *
+mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+ struct mv_xor_desc_slot *sw_desc, *grp_start;
+ int slot_cnt;
+
+ dev_dbg(mv_chan->device->common.dev,
+ "%s dest: %x src %x len: %u flags: %ld\n",
+ __func__, dest, src, len, flags);
+ if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
+ return NULL;
+
+ BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+
+ spin_lock_bh(&mv_chan->lock);
+ slot_cnt = mv_chan_memcpy_slot_count(len);
+ sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
+ if (sw_desc) {
+ sw_desc->type = DMA_MEMCPY;
+ sw_desc->async_tx.flags = flags;
+ grp_start = sw_desc->group_head;
+ mv_desc_init(grp_start, flags);
+ mv_desc_set_byte_count(grp_start, len);
+ mv_desc_set_dest_addr(sw_desc->group_head, dest);
+ mv_desc_set_src_addr(grp_start, 0, src);
+ sw_desc->unmap_src_cnt = 1;
+ sw_desc->unmap_len = len;
+ }
+ spin_unlock_bh(&mv_chan->lock);
+
+ dev_dbg(mv_chan->device->common.dev,
+ "%s sw_desc %p async_tx %p\n",
+ __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+static struct dma_async_tx_descriptor *
+mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
+ size_t len, unsigned long flags)
+{
+ struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+ struct mv_xor_desc_slot *sw_desc, *grp_start;
+ int slot_cnt;
+
+ dev_dbg(mv_chan->device->common.dev,
+ "%s dest: %x len: %u flags: %ld\n",
+ __func__, dest, len, flags);
+ if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
+ return NULL;
+
+ BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+
+ spin_lock_bh(&mv_chan->lock);
+ slot_cnt = mv_chan_memset_slot_count(len);
+ sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
+ if (sw_desc) {
+ sw_desc->type = DMA_MEMSET;
+ sw_desc->async_tx.flags = flags;
+ grp_start = sw_desc->group_head;
+ mv_desc_init(grp_start, flags);
+ mv_desc_set_byte_count(grp_start, len);
+ mv_desc_set_dest_addr(sw_desc->group_head, dest);
+ mv_desc_set_block_fill_val(grp_start, value);
+ sw_desc->unmap_src_cnt = 1;
+ sw_desc->unmap_len = len;
+ }
+ spin_unlock_bh(&mv_chan->lock);
+ dev_dbg(mv_chan->device->common.dev,
+ "%s sw_desc %p async_tx %p \n",
+ __func__, sw_desc, &sw_desc->async_tx);
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+static struct dma_async_tx_descriptor *
+mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags)
+{
+ struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+ struct mv_xor_desc_slot *sw_desc, *grp_start;
+ int slot_cnt;
+
+ if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
+ return NULL;
+
+ BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
+
+ dev_dbg(mv_chan->device->common.dev,
+ "%s src_cnt: %d len: dest %x %u flags: %ld\n",
+ __func__, src_cnt, len, dest, flags);
+
+ spin_lock_bh(&mv_chan->lock);
+ slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
+ sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
+ if (sw_desc) {
+ sw_desc->type = DMA_XOR;
+ sw_desc->async_tx.flags = flags;
+ grp_start = sw_desc->group_head;
+ mv_desc_init(grp_start, flags);
+ /* the byte count field is the same as in memcpy desc*/
+ mv_desc_set_byte_count(grp_start, len);
+ mv_desc_set_dest_addr(sw_desc->group_head, dest);
+ sw_desc->unmap_src_cnt = src_cnt;
+ sw_desc->unmap_len = len;
+ while (src_cnt--)
+ mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
+ }
+ spin_unlock_bh(&mv_chan->lock);
+ dev_dbg(mv_chan->device->common.dev,
+ "%s sw_desc %p async_tx %p \n",
+ __func__, sw_desc, &sw_desc->async_tx);
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+static void mv_xor_free_chan_resources(struct dma_chan *chan)
+{
+ struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+ struct mv_xor_desc_slot *iter, *_iter;
+ int in_use_descs = 0;
+
+ mv_xor_slot_cleanup(mv_chan);
+
+ spin_lock_bh(&mv_chan->lock);
+ list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
+ chain_node) {
+ in_use_descs++;
+ list_del(&iter->chain_node);
+ }
+ list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
+ completed_node) {
+ in_use_descs++;
+ list_del(&iter->completed_node);
+ }
+ list_for_each_entry_safe_reverse(
+ iter, _iter, &mv_chan->all_slots, slot_node) {
+ list_del(&iter->slot_node);
+ kfree(iter);
+ mv_chan->slots_allocated--;
+ }
+ mv_chan->last_used = NULL;
+
+ dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
+ __func__, mv_chan->slots_allocated);
+ spin_unlock_bh(&mv_chan->lock);
+
+ if (in_use_descs)
+ dev_err(mv_chan->device->common.dev,
+ "freeing %d in use descriptors!\n", in_use_descs);
+}
+
+/**
+ * mv_xor_is_complete - poll the status of an XOR transaction
+ * @chan: XOR channel handle
+ * @cookie: XOR transaction identifier
+ */
+static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ dma_cookie_t *done,
+ dma_cookie_t *used)
+{
+ struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ enum dma_status ret;
+
+ last_used = chan->cookie;
+ last_complete = mv_chan->completed_cookie;
+ mv_chan->is_complete_cookie = cookie;
+ if (done)
+ *done = last_complete;
+ if (used)
+ *used = last_used;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ if (ret == DMA_SUCCESS) {
+ mv_xor_clean_completed_slots(mv_chan);
+ return ret;
+ }
+ mv_xor_slot_cleanup(mv_chan);
+
+ last_used = chan->cookie;
+ last_complete = mv_chan->completed_cookie;
+
+ if (done)
+ *done = last_complete;
+ if (used)
+ *used = last_used;
+
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+static void mv_dump_xor_regs(struct mv_xor_chan *chan)
+{
+ u32 val;
+
+ val = __raw_readl(XOR_CONFIG(chan));
+ dev_printk(KERN_ERR, chan->device->common.dev,
+ "config 0x%08x.\n", val);
+
+ val = __raw_readl(XOR_ACTIVATION(chan));
+ dev_printk(KERN_ERR, chan->device->common.dev,
+ "activation 0x%08x.\n", val);
+
+ val = __raw_readl(XOR_INTR_CAUSE(chan));
+ dev_printk(KERN_ERR, chan->device->common.dev,
+ "intr cause 0x%08x.\n", val);
+
+ val = __raw_readl(XOR_INTR_MASK(chan));
+ dev_printk(KERN_ERR, chan->device->common.dev,
+ "intr mask 0x%08x.\n", val);
+
+ val = __raw_readl(XOR_ERROR_CAUSE(chan));
+ dev_printk(KERN_ERR, chan->device->common.dev,
+ "error cause 0x%08x.\n", val);
+
+ val = __raw_readl(XOR_ERROR_ADDR(chan));
+ dev_printk(KERN_ERR, chan->device->common.dev,
+ "error addr 0x%08x.\n", val);
+}
+
+static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
+ u32 intr_cause)
+{
+ if (intr_cause & (1 << 4)) {
+ dev_dbg(chan->device->common.dev,
+ "ignore this error\n");
+ return;
+ }
+
+ dev_printk(KERN_ERR, chan->device->common.dev,
+ "error on chan %d. intr cause 0x%08x.\n",
+ chan->idx, intr_cause);
+
+ mv_dump_xor_regs(chan);
+ BUG();
+}
+
+static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
+{
+ struct mv_xor_chan *chan = data;
+ u32 intr_cause = mv_chan_get_intr_cause(chan);
+
+ dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
+
+ if (mv_is_err_intr(intr_cause))
+ mv_xor_err_interrupt_handler(chan, intr_cause);
+
+ tasklet_schedule(&chan->irq_tasklet);
+
+ mv_xor_device_clear_eoc_cause(chan);
+
+ return IRQ_HANDLED;
+}
+
+static void mv_xor_issue_pending(struct dma_chan *chan)
+{
+ struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+
+ if (mv_chan->pending >= MV_XOR_THRESHOLD) {
+ mv_chan->pending = 0;
+ mv_chan_activate(mv_chan);
+ }
+}
+
+/*
+ * Perform a transaction to verify the HW works.
+ */
+#define MV_XOR_TEST_SIZE 2000
+
+static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
+{
+ int i;
+ void *src, *dest;
+ dma_addr_t src_dma, dest_dma;
+ struct dma_chan *dma_chan;
+ dma_cookie_t cookie;
+ struct dma_async_tx_descriptor *tx;
+ int err = 0;
+ struct mv_xor_chan *mv_chan;
+
+ src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
+ if (!src)
+ return -ENOMEM;
+
+ dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
+ if (!dest) {
+ kfree(src);
+ return -ENOMEM;
+ }
+
+ /* Fill in src buffer */
+ for (i = 0; i < MV_XOR_TEST_SIZE; i++)
+ ((u8 *) src)[i] = (u8)i;
+
+ /* Start copy, using first DMA channel */
+ dma_chan = container_of(device->common.channels.next,
+ struct dma_chan,
+ device_node);
+ if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ dest_dma = dma_map_single(dma_chan->device->dev, dest,
+ MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
+
+ src_dma = dma_map_single(dma_chan->device->dev, src,
+ MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
+
+ tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
+ MV_XOR_TEST_SIZE, 0);
+ cookie = mv_xor_tx_submit(tx);
+ mv_xor_issue_pending(dma_chan);
+ async_tx_ack(tx);
+ msleep(1);
+
+ if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
+ DMA_SUCCESS) {
+ dev_printk(KERN_ERR, dma_chan->device->dev,
+ "Self-test copy timed out, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ mv_chan = to_mv_xor_chan(dma_chan);
+ dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
+ MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
+ if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
+ dev_printk(KERN_ERR, dma_chan->device->dev,
+ "Self-test copy failed compare, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+free_resources:
+ mv_xor_free_chan_resources(dma_chan);
+out:
+ kfree(src);
+ kfree(dest);
+ return err;
+}
+
+#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
+static int __devinit
+mv_xor_xor_self_test(struct mv_xor_device *device)
+{
+ int i, src_idx;
+ struct page *dest;
+ struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
+ dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
+ dma_addr_t dest_dma;
+ struct dma_async_tx_descriptor *tx;
+ struct dma_chan *dma_chan;
+ dma_cookie_t cookie;
+ u8 cmp_byte = 0;
+ u32 cmp_word;
+ int err = 0;
+ struct mv_xor_chan *mv_chan;
+
+ for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
+ xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
+ if (!xor_srcs[src_idx])
+ while (src_idx--) {
+ __free_page(xor_srcs[src_idx]);
+ return -ENOMEM;
+ }
+ }
+
+ dest = alloc_page(GFP_KERNEL);
+ if (!dest)
+ while (src_idx--) {
+ __free_page(xor_srcs[src_idx]);
+ return -ENOMEM;
+ }
+
+ /* Fill in src buffers */
+ for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
+ u8 *ptr = page_address(xor_srcs[src_idx]);
+ for (i = 0; i < PAGE_SIZE; i++)
+ ptr[i] = (1 << src_idx);
+ }
+
+ for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
+ cmp_byte ^= (u8) (1 << src_idx);
+
+ cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
+ (cmp_byte << 8) | cmp_byte;
+
+ memset(page_address(dest), 0, PAGE_SIZE);
+
+ dma_chan = container_of(device->common.channels.next,
+ struct dma_chan,
+ device_node);
+ if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* test xor */
+ dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+
+ for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
+ dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
+ 0, PAGE_SIZE, DMA_TO_DEVICE);
+
+ tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
+ MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
+
+ cookie = mv_xor_tx_submit(tx);
+ mv_xor_issue_pending(dma_chan);
+ async_tx_ack(tx);
+ msleep(8);
+
+ if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
+ DMA_SUCCESS) {
+ dev_printk(KERN_ERR, dma_chan->device->dev,
+ "Self-test xor timed out, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ mv_chan = to_mv_xor_chan(dma_chan);
+ dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
+ u32 *ptr = page_address(dest);
+ if (ptr[i] != cmp_word) {
+ dev_printk(KERN_ERR, dma_chan->device->dev,
+ "Self-test xor failed compare, disabling."
+ " index %d, data %x, expected %x\n", i,
+ ptr[i], cmp_word);
+ err = -ENODEV;
+ goto free_resources;
+ }
+ }
+
+free_resources:
+ mv_xor_free_chan_resources(dma_chan);
+out:
+ src_idx = MV_XOR_NUM_SRC_TEST;
+ while (src_idx--)
+ __free_page(xor_srcs[src_idx]);
+ __free_page(dest);
+ return err;
+}
+
+static int __devexit mv_xor_remove(struct platform_device *dev)
+{
+ struct mv_xor_device *device = platform_get_drvdata(dev);
+ struct dma_chan *chan, *_chan;
+ struct mv_xor_chan *mv_chan;
+ struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
+
+ dma_async_device_unregister(&device->common);
+
+ dma_free_coherent(&dev->dev, plat_data->pool_size,
+ device->dma_desc_pool_virt, device->dma_desc_pool);
+
+ list_for_each_entry_safe(chan, _chan, &device->common.channels,
+ device_node) {
+ mv_chan = to_mv_xor_chan(chan);
+ list_del(&chan->device_node);
+ }
+
+ return 0;
+}
+
+static int __devinit mv_xor_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int irq;
+ struct mv_xor_device *adev;
+ struct mv_xor_chan *mv_chan;
+ struct dma_device *dma_dev;
+ struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
+
+
+ adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ dma_dev = &adev->common;
+
+ /* allocate coherent memory for hardware descriptors
+ * note: writecombine gives slightly better performance, but
+ * requires that we explicitly flush the writes
+ */
+ adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
+ plat_data->pool_size,
+ &adev->dma_desc_pool,
+ GFP_KERNEL);
+ if (!adev->dma_desc_pool_virt)
+ return -ENOMEM;
+
+ adev->id = plat_data->hw_id;
+
+ /* discover transaction capabilites from the platform data */
+ dma_dev->cap_mask = plat_data->cap_mask;
+ adev->pdev = pdev;
+ platform_set_drvdata(pdev, adev);
+
+ adev->shared = platform_get_drvdata(plat_data->shared);
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ /* set base routines */
+ dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
+ dma_dev->device_is_tx_complete = mv_xor_is_complete;
+ dma_dev->device_issue_pending = mv_xor_issue_pending;
+ dma_dev->dev = &pdev->dev;
+
+ /* set prep routines based on capability */
+ if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
+ if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
+ if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
+ dma_dev->max_xor = 8; ;
+ dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
+ }
+
+ mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
+ if (!mv_chan) {
+ ret = -ENOMEM;
+ goto err_free_dma;
+ }
+ mv_chan->device = adev;
+ mv_chan->idx = plat_data->hw_id;
+ mv_chan->mmr_base = adev->shared->xor_base;
+
+ if (!mv_chan->mmr_base) {
+ ret = -ENOMEM;
+ goto err_free_dma;
+ }
+ tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
+ mv_chan);
+
+ /* clear errors before enabling interrupts */
+ mv_xor_device_clear_err_status(mv_chan);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_free_dma;
+ }
+ ret = devm_request_irq(&pdev->dev, irq,
+ mv_xor_interrupt_handler,
+ 0, dev_name(&pdev->dev), mv_chan);
+ if (ret)
+ goto err_free_dma;
+
+ mv_chan_unmask_interrupts(mv_chan);
+
+ mv_set_mode(mv_chan, DMA_MEMCPY);
+
+ spin_lock_init(&mv_chan->lock);
+ INIT_LIST_HEAD(&mv_chan->chain);
+ INIT_LIST_HEAD(&mv_chan->completed_slots);
+ INIT_LIST_HEAD(&mv_chan->all_slots);
+ INIT_RCU_HEAD(&mv_chan->common.rcu);
+ mv_chan->common.device = dma_dev;
+
+ list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
+
+ if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
+ ret = mv_xor_memcpy_self_test(adev);
+ dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
+ if (ret)
+ goto err_free_dma;
+ }
+
+ if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
+ ret = mv_xor_xor_self_test(adev);
+ dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
+ if (ret)
+ goto err_free_dma;
+ }
+
+ dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
+ "( %s%s%s%s)\n",
+ dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
+ dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
+ dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
+
+ dma_async_device_register(dma_dev);
+ goto out;
+
+ err_free_dma:
+ dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
+ adev->dma_desc_pool_virt, adev->dma_desc_pool);
+ out:
+ return ret;
+}
+
+static void
+mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
+ struct mbus_dram_target_info *dram)
+{
+ void __iomem *base = msp->xor_base;
+ u32 win_enable = 0;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ writel(0, base + WINDOW_BASE(i));
+ writel(0, base + WINDOW_SIZE(i));
+ if (i < 4)
+ writel(0, base + WINDOW_REMAP_HIGH(i));
+ }
+
+ for (i = 0; i < dram->num_cs; i++) {
+ struct mbus_dram_window *cs = dram->cs + i;
+
+ writel((cs->base & 0xffff0000) |
+ (cs->mbus_attr << 8) |
+ dram->mbus_dram_target_id, base + WINDOW_BASE(i));
+ writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
+
+ win_enable |= (1 << i);
+ win_enable |= 3 << (16 + (2 * i));
+ }
+
+ writel(win_enable, base + WINDOW_BAR_ENABLE(0));
+ writel(win_enable, base + WINDOW_BAR_ENABLE(1));
+}
+
+static struct platform_driver mv_xor_driver = {
+ .probe = mv_xor_probe,
+ .remove = mv_xor_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = MV_XOR_NAME,
+ },
+};
+
+static int mv_xor_shared_probe(struct platform_device *pdev)
+{
+ struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data;
+ struct mv_xor_shared_private *msp;
+ struct resource *res;
+
+ dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
+
+ msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
+ if (!msp)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ msp->xor_base = devm_ioremap(&pdev->dev, res->start,
+ res->end - res->start + 1);
+ if (!msp->xor_base)
+ return -EBUSY;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res)
+ return -ENODEV;
+
+ msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
+ res->end - res->start + 1);
+ if (!msp->xor_high_base)
+ return -EBUSY;
+
+ platform_set_drvdata(pdev, msp);
+
+ /*
+ * (Re-)program MBUS remapping windows if we are asked to.
+ */
+ if (msd != NULL && msd->dram != NULL)
+ mv_xor_conf_mbus_windows(msp, msd->dram);
+
+ return 0;
+}
+
+static int mv_xor_shared_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver mv_xor_shared_driver = {
+ .probe = mv_xor_shared_probe,
+ .remove = mv_xor_shared_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = MV_XOR_SHARED_NAME,
+ },
+};
+
+
+static int __init mv_xor_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&mv_xor_shared_driver);
+ if (!rc) {
+ rc = platform_driver_register(&mv_xor_driver);
+ if (rc)
+ platform_driver_unregister(&mv_xor_shared_driver);
+ }
+ return rc;
+}
+module_init(mv_xor_init);
+
+/* it's currently unsafe to unload this module */
+#if 0
+static void __exit mv_xor_exit(void)
+{
+ platform_driver_unregister(&mv_xor_driver);
+ platform_driver_unregister(&mv_xor_shared_driver);
+ return;
+}
+
+module_exit(mv_xor_exit);
+#endif
+
+MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
+MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
new file mode 100644
index 00000000000..06cafe1ef52
--- /dev/null
+++ b/drivers/dma/mv_xor.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2007, 2008, Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef MV_XOR_H
+#define MV_XOR_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#define USE_TIMER
+#define MV_XOR_SLOT_SIZE 64
+#define MV_XOR_THRESHOLD 1
+
+#define XOR_OPERATION_MODE_XOR 0
+#define XOR_OPERATION_MODE_MEMCPY 2
+#define XOR_OPERATION_MODE_MEMSET 4
+
+#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
+#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
+#define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4))
+#define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4))
+#define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4))
+#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0)
+#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4)
+
+#define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4))
+#define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4))
+#define XOR_INTR_CAUSE(chan) (chan->mmr_base + 0x30)
+#define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40)
+#define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50)
+#define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60)
+#define XOR_INTR_MASK_VALUE 0x3F5
+
+#define WINDOW_BASE(w) (0x250 + ((w) << 2))
+#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
+#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
+#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
+
+struct mv_xor_shared_private {
+ void __iomem *xor_base;
+ void __iomem *xor_high_base;
+};
+
+
+/**
+ * struct mv_xor_device - internal representation of a XOR device
+ * @pdev: Platform device
+ * @id: HW XOR Device selector
+ * @dma_desc_pool: base of DMA descriptor region (DMA address)
+ * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
+ * @common: embedded struct dma_device
+ */
+struct mv_xor_device {
+ struct platform_device *pdev;
+ int id;
+ dma_addr_t dma_desc_pool;
+ void *dma_desc_pool_virt;
+ struct dma_device common;
+ struct mv_xor_shared_private *shared;
+};
+
+/**
+ * struct mv_xor_chan - internal representation of a XOR channel
+ * @pending: allows batching of hardware operations
+ * @completed_cookie: identifier for the most recently completed operation
+ * @lock: serializes enqueue/dequeue operations to the descriptors pool
+ * @mmr_base: memory mapped register base
+ * @idx: the index of the xor channel
+ * @chain: device chain view of the descriptors
+ * @completed_slots: slots completed by HW but still need to be acked
+ * @device: parent device
+ * @common: common dmaengine channel object members
+ * @last_used: place holder for allocation to continue from where it left off
+ * @all_slots: complete domain of slots usable by the channel
+ * @slots_allocated: records the actual size of the descriptor slot pool
+ * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
+ */
+struct mv_xor_chan {
+ int pending;
+ dma_cookie_t completed_cookie;
+ spinlock_t lock; /* protects the descriptor slot pool */
+ void __iomem *mmr_base;
+ unsigned int idx;
+ enum dma_transaction_type current_type;
+ struct list_head chain;
+ struct list_head completed_slots;
+ struct mv_xor_device *device;
+ struct dma_chan common;
+ struct mv_xor_desc_slot *last_used;
+ struct list_head all_slots;
+ int slots_allocated;
+ struct tasklet_struct irq_tasklet;
+#ifdef USE_TIMER
+ unsigned long cleanup_time;
+ u32 current_on_last_cleanup;
+ dma_cookie_t is_complete_cookie;
+#endif
+};
+
+/**
+ * struct mv_xor_desc_slot - software descriptor
+ * @slot_node: node on the mv_xor_chan.all_slots list
+ * @chain_node: node on the mv_xor_chan.chain list
+ * @completed_node: node on the mv_xor_chan.completed_slots list
+ * @hw_desc: virtual address of the hardware descriptor chain
+ * @phys: hardware address of the hardware descriptor chain
+ * @group_head: first operation in a transaction
+ * @slot_cnt: total slots used in an transaction (group of operations)
+ * @slots_per_op: number of slots per operation
+ * @idx: pool index
+ * @unmap_src_cnt: number of xor sources
+ * @unmap_len: transaction bytecount
+ * @async_tx: support for the async_tx api
+ * @group_list: list of slots that make up a multi-descriptor transaction
+ * for example transfer lengths larger than the supported hw max
+ * @xor_check_result: result of zero sum
+ * @crc32_result: result crc calculation
+ */
+struct mv_xor_desc_slot {
+ struct list_head slot_node;
+ struct list_head chain_node;
+ struct list_head completed_node;
+ enum dma_transaction_type type;
+ void *hw_desc;
+ struct mv_xor_desc_slot *group_head;
+ u16 slot_cnt;
+ u16 slots_per_op;
+ u16 idx;
+ u16 unmap_src_cnt;
+ u32 value;
+ size_t unmap_len;
+ struct dma_async_tx_descriptor async_tx;
+ union {
+ u32 *xor_check_result;
+ u32 *crc32_result;
+ };
+#ifdef USE_TIMER
+ unsigned long arrival_time;
+ struct timer_list timeout;
+#endif
+};
+
+/* This structure describes XOR descriptor size 64bytes */
+struct mv_xor_desc {
+ u32 status; /* descriptor execution status */
+ u32 crc32_result; /* result of CRC-32 calculation */
+ u32 desc_command; /* type of operation to be carried out */
+ u32 phy_next_desc; /* next descriptor address pointer */
+ u32 byte_count; /* size of src/dst blocks in bytes */
+ u32 phy_dest_addr; /* destination block address */
+ u32 phy_src_addr[8]; /* source block addresses */
+ u32 reserved0;
+ u32 reserved1;
+};
+
+#define to_mv_sw_desc(addr_hw_desc) \
+ container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
+
+#define mv_hw_desc_slot_idx(hw_desc, idx) \
+ ((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
+
+#define MV_XOR_MIN_BYTE_COUNT (128)
+#define XOR_MAX_BYTE_COUNT ((16 * 1024 * 1024) - 1)
+#define MV_XOR_MAX_BYTE_COUNT XOR_MAX_BYTE_COUNT
+
+
+#endif
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 25918f7dfd0..0b624e927a6 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -254,6 +254,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
static int smi_request(struct smi_cmd *smi_cmd)
{
cpumask_t old_mask;
+ cpumask_of_cpu_ptr(new_mask, 0);
int ret = 0;
if (smi_cmd->magic != SMI_CMD_MAGIC) {
@@ -264,7 +265,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
/* SMI requires CPU 0 */
old_mask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
+ set_cpus_allowed_ptr(current, new_mask);
if (smp_processor_id() != 0) {
dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
__func__);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f43d6d3cf2f..426ac5add58 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -780,7 +780,7 @@ static __inline__ __u32 extract(__u8 *report, unsigned offset, unsigned n)
*/
static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u32 value)
{
- __le64 x;
+ u64 x;
u64 m = (1ULL << n) - 1;
if (n > 32)
@@ -796,10 +796,10 @@ static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u3
report += offset >> 3;
offset &= 7;
- x = get_unaligned((__le64 *)report);
- x &= cpu_to_le64(~(m << offset));
- x |= cpu_to_le64(((u64) value) << offset);
- put_unaligned(x, (__le64 *) report);
+ x = get_unaligned_le64(report);
+ x &= ~(m << offset);
+ x |= ((u64)value) << offset;
+ put_unaligned_le64(x, report);
}
/*
diff --git a/drivers/hid/hid-input-quirks.c b/drivers/hid/hid-input-quirks.c
index 4c2052c658f..16feea01449 100644
--- a/drivers/hid/hid-input-quirks.c
+++ b/drivers/hid/hid-input-quirks.c
@@ -89,6 +89,29 @@ static int quirk_logitech_ultrax_remote(struct hid_usage *usage, struct input_de
return 1;
}
+static int quirk_gyration_remote(struct hid_usage *usage, struct input_dev *input,
+ unsigned long **bit, int *max)
+{
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
+ return 0;
+
+ set_bit(EV_REP, input->evbit);
+ switch(usage->hid & HID_USAGE) {
+ /* Reported on Gyration MCE Remote */
+ case 0x00d: map_key_clear(KEY_HOME); break;
+ case 0x024: map_key_clear(KEY_DVD); break;
+ case 0x025: map_key_clear(KEY_PVR); break;
+ case 0x046: map_key_clear(KEY_MEDIA); break;
+ case 0x047: map_key_clear(KEY_MP3); break;
+ case 0x049: map_key_clear(KEY_CAMERA); break;
+ case 0x04a: map_key_clear(KEY_VIDEO); break;
+
+ default:
+ return 0;
+ }
+ return 1;
+}
+
static int quirk_chicony_tactical_pad(struct hid_usage *usage, struct input_dev *input,
unsigned long **bit, int *max)
{
@@ -303,6 +326,9 @@ static int quirk_sunplus_wdesktop(struct hid_usage *usage, struct input_dev *inp
#define VENDOR_ID_EZKEY 0x0518
#define DEVICE_ID_BTC_8193 0x0002
+#define VENDOR_ID_GYRATION 0x0c16
+#define DEVICE_ID_GYRATION_REMOTE 0x0002
+
#define VENDOR_ID_LOGITECH 0x046d
#define DEVICE_ID_LOGITECH_RECEIVER 0xc101
#define DEVICE_ID_S510_RECEIVER 0xc50c
@@ -337,6 +363,8 @@ static const struct hid_input_blacklist {
{ VENDOR_ID_EZKEY, DEVICE_ID_BTC_8193, quirk_btc_8193 },
+ { VENDOR_ID_GYRATION, DEVICE_ID_GYRATION_REMOTE, quirk_gyration_remote },
+
{ VENDOR_ID_LOGITECH, DEVICE_ID_LOGITECH_RECEIVER, quirk_logitech_ultrax_remote },
{ VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER, quirk_logitech_wireless },
{ VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER_2, quirk_logitech_wireless },
@@ -438,6 +466,18 @@ int hidinput_event_quirks(struct hid_device *hid, struct hid_field *field, struc
input_event(input, usage->type, REL_WHEEL, -value);
return 1;
}
+
+ /* Gyration MCE remote "Sleep" key */
+ if (hid->vendor == VENDOR_ID_GYRATION &&
+ hid->product == DEVICE_ID_GYRATION_REMOTE &&
+ (usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
+ (usage->hid & 0xff) == 0x82) {
+ input_event(input, usage->type, usage->code, 1);
+ input_sync(input);
+ input_event(input, usage->type, usage->code, 0);
+ input_sync(input);
+ return 1;
+ }
return 0;
}
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 5c52a20ad34..1b2e8dc3398 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -100,6 +100,8 @@ static struct hidinput_key_translation apple_fn_keys[] = {
{ KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY },
{ KEY_F3, KEY_FN_F5, APPLE_FLAG_FKEY }, /* Exposé */
{ KEY_F4, KEY_FN_F4, APPLE_FLAG_FKEY }, /* Dashboard */
+ { KEY_F5, KEY_KBDILLUMDOWN, APPLE_FLAG_FKEY },
+ { KEY_F6, KEY_KBDILLUMUP, APPLE_FLAG_FKEY },
{ KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY },
{ KEY_F8, KEY_PLAYPAUSE, APPLE_FLAG_FKEY },
{ KEY_F9, KEY_NEXTSONG, APPLE_FLAG_FKEY },
@@ -612,6 +614,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x0b6: map_key_clear(KEY_PREVIOUSSONG); break;
case 0x0b7: map_key_clear(KEY_STOPCD); break;
case 0x0b8: map_key_clear(KEY_EJECTCD); break;
+ case 0x0bc: map_key_clear(KEY_MEDIA_REPEAT); break;
case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break;
case 0x0e0: map_abs_clear(ABS_VOLUME); break;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 0c6b4d4e7e2..c40f0403eda 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -105,6 +105,7 @@ out:
static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
unsigned int minor = iminor(file->f_path.dentry->d_inode);
+ /* FIXME: What stops hidraw_table going NULL */
struct hid_device *dev = hidraw_table[minor]->hid;
__u8 *buf;
int ret = 0;
@@ -211,38 +212,43 @@ static int hidraw_release(struct inode * inode, struct file * file)
kfree(list->hidraw);
}
+ kfree(list);
+
return 0;
}
-static int hidraw_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static long hidraw_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
+ struct inode *inode = file->f_path.dentry->d_inode;
unsigned int minor = iminor(inode);
+ long ret = 0;
+ /* FIXME: What stops hidraw_table going NULL */
struct hidraw *dev = hidraw_table[minor];
void __user *user_arg = (void __user*) arg;
+ lock_kernel();
switch (cmd) {
case HIDIOCGRDESCSIZE:
if (put_user(dev->hid->rsize, (int __user *)arg))
- return -EFAULT;
- return 0;
+ ret = -EFAULT;
+ break;
case HIDIOCGRDESC:
{
__u32 len;
if (get_user(len, (int __user *)arg))
- return -EFAULT;
-
- if (len > HID_MAX_DESCRIPTOR_SIZE - 1)
- return -EINVAL;
-
- if (copy_to_user(user_arg + offsetof(
- struct hidraw_report_descriptor,
- value[0]),
- dev->hid->rdesc,
- min(dev->hid->rsize, len)))
- return -EFAULT;
- return 0;
+ ret = -EFAULT;
+ else if (len > HID_MAX_DESCRIPTOR_SIZE - 1)
+ ret = -EINVAL;
+ else if (copy_to_user(user_arg + offsetof(
+ struct hidraw_report_descriptor,
+ value[0]),
+ dev->hid->rdesc,
+ min(dev->hid->rsize, len)))
+ ret = -EFAULT;
+ break;
}
case HIDIOCGRAWINFO:
{
@@ -252,15 +258,13 @@ static int hidraw_ioctl(struct inode *inode, struct file *file, unsigned int cmd
dinfo.vendor = dev->hid->vendor;
dinfo.product = dev->hid->product;
if (copy_to_user(user_arg, &dinfo, sizeof(dinfo)))
- return -EFAULT;
-
- return 0;
+ ret = -EFAULT;
+ break;
}
default:
- printk(KERN_EMERG "hidraw: unsupported ioctl() %x\n",
- cmd);
+ ret = -ENOTTY;
}
- return -EINVAL;
+ return ret;
}
static const struct file_operations hidraw_ops = {
@@ -270,7 +274,7 @@ static const struct file_operations hidraw_ops = {
.poll = hidraw_poll,
.open = hidraw_open,
.release = hidraw_release,
- .ioctl = hidraw_ioctl,
+ .unlocked_ioctl = hidraw_ioctl,
};
void hidraw_report_event(struct hid_device *hid, u8 *data, int len)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 1df832a8fcb..61e78a4369b 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -69,12 +69,18 @@
#define USB_DEVICE_ID_APPLE_ALU_ANSI 0x0220
#define USB_DEVICE_ID_APPLE_ALU_ISO 0x0221
#define USB_DEVICE_ID_APPLE_ALU_JIS 0x0222
+#define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI 0x0223
+#define USB_DEVICE_ID_APPLE_WELLSPRING_ISO 0x0224
+#define USB_DEVICE_ID_APPLE_WELLSPRING_JIS 0x0225
#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI 0x0229
#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO 0x022a
#define USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS 0x022b
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI 0x022c
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO 0x022d
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS 0x022e
+#define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230
+#define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231
+#define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232
#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242
@@ -241,6 +247,8 @@
#define USB_DEVICE_ID_LD_MACHINETEST 0x2040
#define USB_VENDOR_ID_LOGITECH 0x046d
+#define USB_DEVICE_ID_LOGITECH_LX3 0xc044
+#define USB_DEVICE_ID_LOGITECH_V150 0xc047
#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
#define USB_DEVICE_ID_LOGITECH_HARMONY 0xc110
#define USB_DEVICE_ID_LOGITECH_HARMONY_2 0xc111
@@ -314,6 +322,7 @@
#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513
+#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
#define USB_DEVICE_ID_DINOVO_EDGE 0xc714
#define USB_DEVICE_ID_DINOVO_MINI 0xc71f
@@ -443,7 +452,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD },
{ USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD },
-
+
+ { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP, HID_QUIRK_DUPLICATE_USAGES },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI, HID_QUIRK_DUPLICATE_USAGES },
@@ -593,6 +603,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP },
+ { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_LX3, HID_QUIRK_INVERT_HWHEEL },
+ { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_V150, HID_QUIRK_INVERT_HWHEEL },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K, HID_QUIRK_MICROSOFT_KEYS },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K, HID_QUIRK_MICROSOFT_KEYS },
@@ -642,6 +654,12 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN },
{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, HID_QUIRK_APPLE_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, HID_QUIRK_APPLE_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, HID_QUIRK_APPLE_HAS_FN },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
+ { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, HID_QUIRK_APPLE_HAS_FN },
{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
{ USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
@@ -1128,7 +1146,7 @@ static void usbhid_fixup_microsoft_descriptor(unsigned char *rdesc, int rsize)
&& rdesc[557] == 0x19
&& rdesc[559] == 0x29) {
printk(KERN_INFO "Fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n");
- rdesc[284] = rdesc[304] = rdesc[558] = 0x35;
+ rdesc[284] = rdesc[304] = rdesc[557] = 0x35;
rdesc[352] = 0x36;
rdesc[286] = rdesc[355] = 0x46;
rdesc[306] = rdesc[559] = 0x45;
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 95cc192bc7a..842e9edb888 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -406,6 +406,7 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL);
if (!uref_multi)
return -ENOMEM;
+ lock_kernel();
uref = &uref_multi->uref;
if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
if (copy_from_user(uref_multi, user_arg,
@@ -501,12 +502,15 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
}
goodreturn:
+ unlock_kernel();
kfree(uref_multi);
return 0;
fault:
+ unlock_kernel();
kfree(uref_multi);
return -EFAULT;
inval:
+ unlock_kernel();
kfree(uref_multi);
return -EINVAL;
}
@@ -540,7 +544,7 @@ static noinline int hiddev_ioctl_string(struct hiddev *hiddev, unsigned int cmd,
return len;
}
-static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct hiddev_list *list = file->private_data;
struct hiddev *hiddev = list->hiddev;
@@ -555,7 +559,10 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
struct usbhid_device *usbhid = hid->driver_data;
void __user *user_arg = (void __user *)arg;
int i;
+
+ /* Called without BKL by compat methods so no BKL taken */
+ /* FIXME: Who or what stop this racing with a disconnect ?? */
if (!hiddev->exist)
return -EIO;
@@ -756,8 +763,7 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
#ifdef CONFIG_COMPAT
static long hiddev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
- return hiddev_ioctl(inode, file, cmd, (unsigned long)compat_ptr(arg));
+ return hiddev_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
}
#endif
@@ -768,7 +774,7 @@ static const struct file_operations hiddev_fops = {
.poll = hiddev_poll,
.open = hiddev_open,
.release = hiddev_release,
- .ioctl = hiddev_ioctl,
+ .unlocked_ioctl = hiddev_ioctl,
.fasync = hiddev_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = hiddev_compat_ioctl,
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index 3cd46d2e53c..0caaafe0184 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -43,7 +43,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE(DRIVER_LICENSE);
-static unsigned char usb_kbd_keycode[256] = {
+static const unsigned char usb_kbd_keycode[256] = {
0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38,
50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26,
@@ -233,14 +233,6 @@ static int usb_kbd_probe(struct usb_interface *iface,
if (!usb_endpoint_is_int_in(endpoint))
return -ENODEV;
-#ifdef CONFIG_USB_HID
- if (usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct))
- & HID_QUIRK_IGNORE) {
- return -ENODEV;
- }
-#endif
-
pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index 703e9d0e871..35689ef172c 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -129,14 +129,6 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
if (!usb_endpoint_is_int_in(endpoint))
return -ENODEV;
-#ifdef CONFIG_USB_HID
- if (usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct))
- & (HID_QUIRK_IGNORE|HID_QUIRK_IGNORE_MOUSE)) {
- return -ENODEV;
- }
-#endif
-
pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 15b09b89588..04d9c4d459d 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -510,6 +510,7 @@ config BLK_DEV_TRIFLEX
config BLK_DEV_CY82C693
tristate "CY82C693 chipset support"
+ depends on ALPHA
select IDE_TIMINGS
select BLK_DEV_IDEDMA_PCI
help
@@ -548,6 +549,7 @@ config BLK_DEV_CS5535
config BLK_DEV_HPT34X
tristate "HPT34X chipset support"
+ depends on BROKEN
select BLK_DEV_IDEDMA_PCI
help
This driver adds up to 4 more EIDE devices sharing a single
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index 52f58c88578..f575e8341ae 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -72,7 +72,7 @@ struct icside_state {
void __iomem *ioc_base;
unsigned int sel;
unsigned int type;
- ide_hwif_t *hwif[2];
+ struct ide_host *host;
};
#define ICS_TYPE_A3IN 0
@@ -375,12 +375,14 @@ static int icside_dma_test_irq(ide_drive_t *drive)
static void icside_dma_timeout(ide_drive_t *drive)
{
+ ide_hwif_t *hwif = drive->hwif;
+
printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
if (icside_dma_test_irq(drive))
return;
- ide_dump_status(drive, "DMA timeout", ide_read_status(drive));
+ ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif));
icside_dma_end(drive);
}
@@ -440,10 +442,10 @@ static void icside_setup_ports(hw_regs_t *hw, void __iomem *base,
static int __init
icside_register_v5(struct icside_state *state, struct expansion_card *ec)
{
- ide_hwif_t *hwif;
void __iomem *base;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- hw_regs_t hw;
+ struct ide_host *host;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ int ret;
base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
if (!base)
@@ -463,22 +465,23 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec);
- hwif = ide_find_port();
- if (!hwif)
+ host = ide_host_alloc(NULL, hws);
+ if (host == NULL)
return -ENODEV;
- ide_init_port_hw(hwif, &hw);
- default_hwif_mmiops(hwif);
-
- state->hwif[0] = hwif;
+ state->host = host;
ecard_set_drvdata(ec, state);
- idx[0] = hwif->index;
-
- ide_device_add(idx, NULL);
+ ret = ide_host_register(host, NULL, hws);
+ if (ret)
+ goto err_free;
return 0;
+err_free:
+ ide_host_free(host);
+ ecard_set_drvdata(ec, NULL);
+ return ret;
}
static const struct ide_port_info icside_v6_port_info __initdata = {
@@ -493,13 +496,12 @@ static const struct ide_port_info icside_v6_port_info __initdata = {
static int __init
icside_register_v6(struct icside_state *state, struct expansion_card *ec)
{
- ide_hwif_t *hwif, *mate;
void __iomem *ioc_base, *easi_base;
+ struct ide_host *host;
unsigned int sel = 0;
int ret;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ hw_regs_t hw[2], *hws[] = { &hw[0], NULL, NULL, NULL };
struct ide_port_info d = icside_v6_port_info;
- hw_regs_t hw[2];
ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
if (!ioc_base) {
@@ -538,28 +540,11 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec);
icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec);
- /*
- * Find and register the interfaces.
- */
- hwif = ide_find_port();
- if (hwif == NULL)
+ host = ide_host_alloc(&d, hws);
+ if (host == NULL)
return -ENODEV;
- ide_init_port_hw(hwif, &hw[0]);
- default_hwif_mmiops(hwif);
-
- idx[0] = hwif->index;
-
- mate = ide_find_port();
- if (mate) {
- ide_init_port_hw(mate, &hw[1]);
- default_hwif_mmiops(mate);
-
- idx[1] = mate->index;
- }
-
- state->hwif[0] = hwif;
- state->hwif[1] = mate;
+ state->host = host;
ecard_set_drvdata(ec, state);
@@ -569,11 +554,17 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
d.dma_ops = NULL;
}
- ide_device_add(idx, &d);
+ ret = ide_host_register(host, NULL, hws);
+ if (ret)
+ goto err_free;
return 0;
-
- out:
+err_free:
+ ide_host_free(host);
+ if (d.dma_ops)
+ free_dma(ec->dma);
+ ecard_set_drvdata(ec, NULL);
+out:
return ret;
}
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c
index 2f311da4c96..176532ffae0 100644
--- a/drivers/ide/arm/ide_arm.c
+++ b/drivers/ide/arm/ide_arm.c
@@ -28,10 +28,8 @@
static int __init ide_arm_init(void)
{
- ide_hwif_t *hwif;
- hw_regs_t hw;
unsigned long base = IDE_ARM_IO, ctl = IDE_ARM_IO + 0x206;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
if (!request_region(base, 8, DRV_NAME)) {
printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
@@ -51,15 +49,7 @@ static int __init ide_arm_init(void)
hw.irq = IDE_ARM_IRQ;
hw.chipset = ide_generic;
- hwif = ide_find_port();
- if (hwif) {
- ide_init_port_hw(hwif, &hw);
- idx[0] = hwif->index;
-
- ide_device_add(idx, NULL);
- }
-
- return 0;
+ return ide_host_add(NULL, hws, NULL);
}
module_init(ide_arm_init);
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
index c79b85b6e4a..65bb4b8fd57 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -316,15 +316,14 @@ static u8 __devinit palm_bk3710_cable_detect(ide_hwif_t *hwif)
static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif,
const struct ide_port_info *d)
{
- unsigned long base =
- hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
-
printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
if (ide_allocate_dma_engine(hwif))
return -1;
- ide_setup_dma(hwif, base);
+ hwif->dma_base = hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
+
+ hwif->dma_ops = &sff_dma_ops;
return 0;
}
@@ -348,11 +347,10 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
{
struct clk *clk;
struct resource *mem, *irq;
- ide_hwif_t *hwif;
+ struct ide_host *host;
unsigned long base, rate;
- int i;
- hw_regs_t hw;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ int i, rc;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
clk = clk_get(NULL, "IDECLK");
if (IS_ERR(clk))
@@ -394,24 +392,14 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
hw.irq = irq->start;
hw.chipset = ide_palm3710;
- hwif = ide_find_port();
- if (hwif == NULL)
+ rc = ide_host_add(&palm_bk3710_port_info, hws, NULL);
+ if (rc)
goto out;
- i = hwif->index;
-
- ide_init_port_hw(hwif, &hw);
-
- default_hwif_mmiops(hwif);
-
- idx[0] = i;
-
- ide_device_add(idx, &palm_bk3710_port_info);
-
return 0;
out:
printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n");
- return -ENODEV;
+ return rc;
}
/* work with hotplug and coldplug */
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
index 43057e0303c..2bdd8b734af 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/arm/rapide.c
@@ -32,11 +32,10 @@ static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base,
static int __devinit
rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
{
- ide_hwif_t *hwif;
void __iomem *base;
+ struct ide_host *host;
int ret;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- hw_regs_t hw;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
ret = ecard_request_resources(ec);
if (ret)
@@ -53,20 +52,11 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
hw.chipset = ide_generic;
hw.dev = &ec->dev;
- hwif = ide_find_port();
- if (hwif == NULL) {
- ret = -ENOENT;
+ ret = ide_host_add(&rapide_port_info, hws, &host);
+ if (ret)
goto release;
- }
-
- ide_init_port_hw(hwif, &hw);
- default_hwif_mmiops(hwif);
-
- idx[0] = hwif->index;
-
- ide_device_add(idx, &rapide_port_info);
- ecard_set_drvdata(ec, hwif);
+ ecard_set_drvdata(ec, host);
goto out;
release:
@@ -77,11 +67,11 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
static void __devexit rapide_remove(struct expansion_card *ec)
{
- ide_hwif_t *hwif = ecard_get_drvdata(ec);
+ struct ide_host *host = ecard_get_drvdata(ec);
ecard_set_drvdata(ec, NULL);
- ide_unregister(hwif);
+ ide_host_remove(host);
ecard_release_resources(ec);
}
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
index 20fad6d542c..bde7a585f19 100644
--- a/drivers/ide/h8300/ide-h8300.c
+++ b/drivers/ide/h8300/ide-h8300.c
@@ -100,6 +100,8 @@ static void h8300_tf_read(ide_drive_t *drive, ide_task_t *task)
/* be sure we're looking at the low order bits */
outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
+ tf->feature = inb(io_ports->feature_addr);
if (task->tf_flags & IDE_TFLAG_IN_NSECT)
tf->nsect = inb(io_ports->nsect_addr);
if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -153,6 +155,21 @@ static void h8300_output_data(ide_drive_t *drive, struct request *rq,
mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
}
+static const struct ide_tp_ops h8300_tp_ops = {
+ .exec_command = ide_exec_command,
+ .read_status = ide_read_status,
+ .read_altstatus = ide_read_altstatus,
+ .read_sff_dma_status = ide_read_sff_dma_status,
+
+ .set_irq = ide_set_irq,
+
+ .tf_load = h8300_tf_load,
+ .tf_read = h8300_tf_read,
+
+ .input_data = h8300_input_data,
+ .output_data = h8300_output_data,
+};
+
#define H8300_IDE_GAP (2)
static inline void hw_setup(hw_regs_t *hw)
@@ -167,27 +184,14 @@ static inline void hw_setup(hw_regs_t *hw)
hw->chipset = ide_generic;
}
-static inline void hwif_setup(ide_hwif_t *hwif)
-{
- default_hwif_iops(hwif);
-
- hwif->tf_load = h8300_tf_load;
- hwif->tf_read = h8300_tf_read;
-
- hwif->input_data = h8300_input_data;
- hwif->output_data = h8300_output_data;
-}
-
static const struct ide_port_info h8300_port_info = {
+ .tp_ops = &h8300_tp_ops,
.host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA,
};
static int __init h8300_ide_init(void)
{
- hw_regs_t hw;
- ide_hwif_t *hwif;
- int index;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n");
@@ -200,19 +204,7 @@ static int __init h8300_ide_init(void)
hw_setup(&hw);
- hwif = ide_find_port_slot(&h8300_port_info);
- if (hwif == NULL)
- return -ENOENT;
-
- index = hwif->index;
- ide_init_port_hw(hwif, &hw);
- hwif_setup(hwif);
-
- idx[0] = index;
-
- ide_device_add(idx, &h8300_port_info);
-
- return 0;
+ return ide_host_add(&h8300_port_info, hws, NULL);
out_busy:
printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n");
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 2802031de67..adf04f99cde 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -22,6 +22,8 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned, int))
{
ide_hwif_t *hwif = drive->hwif;
+ struct request *rq = hwif->hwgroup->rq;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
xfer_func_t *xferfunc;
unsigned int temp;
u16 bcount;
@@ -30,12 +32,12 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
debug_log("Enter %s - interrupt handler\n", __func__);
if (pc->flags & PC_FLAG_TIMEDOUT) {
- pc->callback(drive);
+ drive->pc_callback(drive);
return ide_stopped;
}
/* Clear the interrupt */
- stat = ide_read_status(drive);
+ stat = tp_ops->read_status(hwif);
if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
if (hwif->dma_ops->dma_end(drive) ||
@@ -63,8 +65,9 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
local_irq_enable_in_hardirq();
if (drive->media == ide_tape && !scsi &&
- (stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE)
+ (stat & ERR_STAT) && rq->cmd[0] == REQUEST_SENSE)
stat &= ~ERR_STAT;
+
if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) {
/* Error detected */
debug_log("%s: I/O error\n", drive->name);
@@ -75,16 +78,17 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
goto cmd_finished;
}
- if (pc->c[0] == REQUEST_SENSE) {
+ if (rq->cmd[0] == REQUEST_SENSE) {
printk(KERN_ERR "%s: I/O error in request sense"
" command\n", drive->name);
return ide_do_reset(drive);
}
- debug_log("[cmd %x]: check condition\n", pc->c[0]);
+ debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
/* Retry operation */
retry_pc(drive);
+
/* queued, but not started */
return ide_stopped;
}
@@ -95,8 +99,10 @@ cmd_finished:
dsc_handle(drive);
return ide_stopped;
}
+
/* Command finished - Call the callback function */
- pc->callback(drive);
+ drive->pc_callback(drive);
+
return ide_stopped;
}
@@ -107,16 +113,15 @@ cmd_finished:
ide_dma_off(drive);
return ide_do_reset(drive);
}
- /* Get the number of bytes to transfer on this interrupt. */
- bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
- hwif->INB(hwif->io_ports.lbam_addr);
- ireason = hwif->INB(hwif->io_ports.nsect_addr);
+ /* Get the number of bytes to transfer on this interrupt. */
+ ide_read_bcount_and_ireason(drive, &bcount, &ireason);
if (ireason & CD) {
printk(KERN_ERR "%s: CoD != 0 in %s\n", drive->name, __func__);
return ide_do_reset(drive);
}
+
if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) {
/* Hopefully, we will never get here */
printk(KERN_ERR "%s: We wanted to %s, but the device wants us "
@@ -125,6 +130,7 @@ cmd_finished:
(ireason & IO) ? "Read" : "Write");
return ide_do_reset(drive);
}
+
if (!(pc->flags & PC_FLAG_WRITING)) {
/* Reading - Check that we have enough space */
temp = pc->xferred + bcount;
@@ -142,7 +148,7 @@ cmd_finished:
if (pc->sg)
io_buffers(drive, pc, temp, 0);
else
- hwif->input_data(drive, NULL,
+ tp_ops->input_data(drive, NULL,
pc->cur_pos, temp);
printk(KERN_ERR "%s: transferred %d of "
"%d bytes\n",
@@ -159,9 +165,9 @@ cmd_finished:
debug_log("The device wants to send us more data than "
"expected - allowing transfer\n");
}
- xferfunc = hwif->input_data;
+ xferfunc = tp_ops->input_data;
} else
- xferfunc = hwif->output_data;
+ xferfunc = tp_ops->output_data;
if ((drive->media == ide_floppy && !scsi && !pc->buf) ||
(drive->media == ide_tape && !scsi && pc->bh) ||
@@ -175,7 +181,7 @@ cmd_finished:
pc->cur_pos += bcount;
debug_log("[cmd %x] transferred %d bytes on that intr.\n",
- pc->c[0], bcount);
+ rq->cmd[0], bcount);
/* And set the interrupt handler again */
ide_set_handler(drive, handler, timeout, expiry);
@@ -183,16 +189,27 @@ cmd_finished:
}
EXPORT_SYMBOL_GPL(ide_pc_intr);
+static u8 ide_read_ireason(ide_drive_t *drive)
+{
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_IN_NSECT;
+
+ drive->hwif->tp_ops->tf_read(drive, &task);
+
+ return task.tf.nsect & 3;
+}
+
static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason)
{
- ide_hwif_t *hwif = drive->hwif;
int retries = 100;
while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing "
"a packet command, retrying\n", drive->name);
udelay(100);
- ireason = hwif->INB(hwif->io_ports.nsect_addr);
+ ireason = ide_read_ireason(drive);
if (retries == 0) {
printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing "
"a packet command, ignoring\n",
@@ -210,6 +227,7 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
ide_expiry_t *expiry)
{
ide_hwif_t *hwif = drive->hwif;
+ struct request *rq = hwif->hwgroup->rq;
ide_startstop_t startstop;
u8 ireason;
@@ -219,7 +237,7 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
return startstop;
}
- ireason = hwif->INB(hwif->io_ports.nsect_addr);
+ ireason = ide_read_ireason(drive);
if (drive->media == ide_tape && !drive->scsi)
ireason = ide_wait_ireason(drive, ireason);
@@ -239,8 +257,8 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
}
/* Send the actual packet */
- if ((pc->flags & PC_FLAG_ZIP_DRIVE) == 0)
- hwif->output_data(drive, NULL, pc->c, 12);
+ if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
+ hwif->tp_ops->output_data(drive, NULL, rq->cmd, 12);
return ide_started;
}
@@ -284,7 +302,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
bcount, dma);
/* Issue the packet command */
- if (pc->flags & PC_FLAG_DRQ_INTERRUPT) {
+ if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
ide_execute_command(drive, WIN_PACKETCMD, handler,
timeout, NULL);
return ide_started;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 6e29dd53209..4e73aeee405 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -85,10 +85,8 @@ static void ide_cd_put(struct cdrom_info *cd)
/* Mark that we've seen a media change and invalidate our internal buffers. */
static void cdrom_saw_media_change(ide_drive_t *drive)
{
- struct cdrom_info *cd = drive->driver_data;
-
- cd->cd_flags |= IDE_CD_FLAG_MEDIA_CHANGED;
- cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID;
+ drive->atapi_flags |= IDE_AFLAG_MEDIA_CHANGED;
+ drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
}
static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
@@ -280,11 +278,12 @@ static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st)
*/
static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
{
- struct request *rq = HWGROUP(drive)->rq;
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq = hwif->hwgroup->rq;
int stat, err, sense_key;
/* check for errors */
- stat = ide_read_status(drive);
+ stat = hwif->tp_ops->read_status(hwif);
if (stat_ret)
*stat_ret = stat;
@@ -528,7 +527,7 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL,
xferlen, info->dma);
- if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) {
+ if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
/* waiting for CDB interrupt, not DMA yet. */
if (info->dma)
drive->waiting_for_dma = 0;
@@ -560,7 +559,7 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
struct cdrom_info *info = drive->driver_data;
ide_startstop_t startstop;
- if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) {
+ if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
/*
* Here we should have been called after receiving an interrupt
* from the device. DRQ should how be set.
@@ -589,7 +588,7 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
cmd_len = ATAPI_MIN_CDB_BYTES;
/* send the command to the device */
- hwif->output_data(drive, NULL, rq->cmd, cmd_len);
+ hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len);
/* start the DMA if need be */
if (info->dma)
@@ -606,6 +605,8 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
int len, int ireason, int rw)
{
+ ide_hwif_t *hwif = drive->hwif;
+
/*
* ireason == 0: the drive wants to receive data from us
* ireason == 2: the drive is expecting to transfer data to us
@@ -624,7 +625,7 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
* Some drives (ASUS) seem to tell us that status info is
* available. Just get it and ignore.
*/
- (void)ide_read_status(drive);
+ (void)hwif->tp_ops->read_status(hwif);
return 0;
} else {
/* drive wants a command packet, or invalid ireason... */
@@ -645,20 +646,18 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
*/
static int ide_cd_check_transfer_size(ide_drive_t *drive, int len)
{
- struct cdrom_info *cd = drive->driver_data;
-
if ((len % SECTOR_SIZE) == 0)
return 0;
printk(KERN_ERR "%s: %s: Bad transfer size %d\n",
drive->name, __func__, len);
- if (cd->cd_flags & IDE_CD_FLAG_LIMIT_NFRAMES)
+ if (drive->atapi_flags & IDE_AFLAG_LIMIT_NFRAMES)
printk(KERN_ERR " This drive is not supported by "
"this version of the driver\n");
else {
printk(KERN_ERR " Trying to limit transfer sizes\n");
- cd->cd_flags |= IDE_CD_FLAG_LIMIT_NFRAMES;
+ drive->atapi_flags |= IDE_AFLAG_LIMIT_NFRAMES;
}
return 1;
@@ -735,7 +734,7 @@ static ide_startstop_t cdrom_seek_intr(ide_drive_t *drive)
if (cdrom_decode_status(drive, 0, &stat))
return ide_stopped;
- info->cd_flags |= IDE_CD_FLAG_SEEKING;
+ drive->atapi_flags |= IDE_AFLAG_SEEKING;
if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) {
if (--retry == 0)
@@ -892,10 +891,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
struct request *rq = HWGROUP(drive)->rq;
xfer_func_t *xferfunc;
ide_expiry_t *expiry = NULL;
- int dma_error = 0, dma, stat, ireason, len, thislen, uptodate = 0;
+ int dma_error = 0, dma, stat, thislen, uptodate = 0;
int write = (rq_data_dir(rq) == WRITE) ? 1 : 0;
unsigned int timeout;
- u8 lowcyl, highcyl;
+ u16 len;
+ u8 ireason;
/* check for errors */
dma = info->dma;
@@ -923,12 +923,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
goto end_request;
}
- /* ok we fall to pio :/ */
- ireason = hwif->INB(hwif->io_ports.nsect_addr) & 0x3;
- lowcyl = hwif->INB(hwif->io_ports.lbam_addr);
- highcyl = hwif->INB(hwif->io_ports.lbah_addr);
-
- len = lowcyl + (256 * highcyl);
+ ide_read_bcount_and_ireason(drive, &len, &ireason);
thislen = blk_fs_request(rq) ? len : rq->data_len;
if (thislen > len)
@@ -991,10 +986,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
if (ireason == 0) {
write = 1;
- xferfunc = hwif->output_data;
+ xferfunc = hwif->tp_ops->output_data;
} else {
write = 0;
- xferfunc = hwif->input_data;
+ xferfunc = hwif->tp_ops->input_data;
}
/* transfer data */
@@ -1198,9 +1193,10 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
int xferlen;
if (blk_fs_request(rq)) {
- if (info->cd_flags & IDE_CD_FLAG_SEEKING) {
+ if (drive->atapi_flags & IDE_AFLAG_SEEKING) {
+ ide_hwif_t *hwif = drive->hwif;
unsigned long elapsed = jiffies - info->start_seek;
- int stat = ide_read_status(drive);
+ int stat = hwif->tp_ops->read_status(hwif);
if ((stat & SEEK_STAT) != SEEK_STAT) {
if (elapsed < IDECD_SEEK_TIMEOUT) {
@@ -1211,7 +1207,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
printk(KERN_ERR "%s: DSC timeout\n",
drive->name);
}
- info->cd_flags &= ~IDE_CD_FLAG_SEEKING;
+ drive->atapi_flags &= ~IDE_AFLAG_SEEKING;
}
if (rq_data_dir(rq) == READ &&
IDE_LARGE_SEEK(info->last_block, block,
@@ -1288,7 +1284,7 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
*/
cmd[7] = cdi->sanyo_slot % 3;
- return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, sense, 0, REQ_QUIET);
+ return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, REQ_QUIET);
}
static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
@@ -1296,8 +1292,8 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
struct request_sense *sense)
{
struct {
- __u32 lba;
- __u32 blocklen;
+ __be32 lba;
+ __be32 blocklen;
} capbuf;
int stat;
@@ -1369,7 +1365,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
*/
(void) cdrom_check_status(drive, sense);
- if (info->cd_flags & IDE_CD_FLAG_TOC_VALID)
+ if (drive->atapi_flags & IDE_AFLAG_TOC_VALID)
return 0;
/* try to get the total cdrom capacity and sector size */
@@ -1391,7 +1387,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
if (stat)
return stat;
- if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) {
+ if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
toc->hdr.first_track = BCD2BIN(toc->hdr.first_track);
toc->hdr.last_track = BCD2BIN(toc->hdr.last_track);
}
@@ -1432,7 +1428,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
if (stat)
return stat;
- if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) {
+ if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
toc->hdr.first_track = (u8)BIN2BCD(CDROM_LEADOUT);
toc->hdr.last_track = (u8)BIN2BCD(CDROM_LEADOUT);
} else {
@@ -1446,14 +1442,14 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length);
- if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) {
+ if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
toc->hdr.first_track = BCD2BIN(toc->hdr.first_track);
toc->hdr.last_track = BCD2BIN(toc->hdr.last_track);
}
for (i = 0; i <= ntracks; i++) {
- if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) {
- if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD)
+ if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
+ if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD)
toc->ent[i].track = BCD2BIN(toc->ent[i].track);
msf_from_bcd(&toc->ent[i].addr.msf);
}
@@ -1476,7 +1472,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */
}
- if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) {
+ if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
/* re-read multisession information using MSF format */
stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
sizeof(ms_tmp), sense);
@@ -1500,7 +1496,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
}
/* Remember that we've read this stuff. */
- info->cd_flags |= IDE_CD_FLAG_TOC_VALID;
+ drive->atapi_flags |= IDE_AFLAG_TOC_VALID;
return 0;
}
@@ -1512,7 +1508,7 @@ int ide_cdrom_get_capabilities(ide_drive_t *drive, u8 *buf)
struct packet_command cgc;
int stat, attempts = 3, size = ATAPI_CAPABILITIES_PAGE_SIZE;
- if ((info->cd_flags & IDE_CD_FLAG_FULL_CAPS_PAGE) == 0)
+ if ((drive->atapi_flags & IDE_AFLAG_FULL_CAPS_PAGE) == 0)
size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE;
init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN);
@@ -1530,15 +1526,12 @@ void ide_cdrom_update_speed(ide_drive_t *drive, u8 *buf)
struct cdrom_info *cd = drive->driver_data;
u16 curspeed, maxspeed;
- curspeed = *(u16 *)&buf[8 + 14];
- maxspeed = *(u16 *)&buf[8 + 8];
-
- if (cd->cd_flags & IDE_CD_FLAG_LE_SPEED_FIELDS) {
- curspeed = le16_to_cpu(curspeed);
- maxspeed = le16_to_cpu(maxspeed);
+ if (drive->atapi_flags & IDE_AFLAG_LE_SPEED_FIELDS) {
+ curspeed = le16_to_cpup((__le16 *)&buf[8 + 14]);
+ maxspeed = le16_to_cpup((__le16 *)&buf[8 + 8]);
} else {
- curspeed = be16_to_cpu(curspeed);
- maxspeed = be16_to_cpu(maxspeed);
+ curspeed = be16_to_cpup((__be16 *)&buf[8 + 14]);
+ maxspeed = be16_to_cpup((__be16 *)&buf[8 + 8]);
}
cd->current_speed = (curspeed + (176/2)) / 176;
@@ -1579,7 +1572,7 @@ static int ide_cdrom_register(ide_drive_t *drive, int nslots)
devinfo->handle = drive;
strcpy(devinfo->name, drive->name);
- if (info->cd_flags & IDE_CD_FLAG_NO_SPEED_SELECT)
+ if (drive->atapi_flags & IDE_AFLAG_NO_SPEED_SELECT)
devinfo->mask |= CDC_SELECT_SPEED;
devinfo->disk = info->disk;
@@ -1605,8 +1598,8 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
return nslots;
}
- if (cd->cd_flags & IDE_CD_FLAG_PRE_ATAPI12) {
- cd->cd_flags &= ~IDE_CD_FLAG_NO_EJECT;
+ if (drive->atapi_flags & IDE_AFLAG_PRE_ATAPI12) {
+ drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT;
cdi->mask &= ~CDC_PLAY_AUDIO;
return nslots;
}
@@ -1624,9 +1617,9 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
return 0;
if ((buf[8 + 6] & 0x01) == 0)
- cd->cd_flags |= IDE_CD_FLAG_NO_DOORLOCK;
+ drive->atapi_flags |= IDE_AFLAG_NO_DOORLOCK;
if (buf[8 + 6] & 0x08)
- cd->cd_flags &= ~IDE_CD_FLAG_NO_EJECT;
+ drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT;
if (buf[8 + 3] & 0x01)
cdi->mask &= ~CDC_CD_R;
if (buf[8 + 3] & 0x02)
@@ -1637,7 +1630,7 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
cdi->mask &= ~(CDC_DVD_RAM | CDC_RAM);
if (buf[8 + 3] & 0x10)
cdi->mask &= ~CDC_DVD_R;
- if ((buf[8 + 4] & 0x01) || (cd->cd_flags & IDE_CD_FLAG_PLAY_AUDIO_OK))
+ if ((buf[8 + 4] & 0x01) || (drive->atapi_flags & IDE_AFLAG_PLAY_AUDIO_OK))
cdi->mask &= ~CDC_PLAY_AUDIO;
mechtype = buf[8 + 6] >> 5;
@@ -1679,7 +1672,7 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
else
printk(KERN_CONT " drive");
- printk(KERN_CONT ", %dkB Cache\n", be16_to_cpu(*(u16 *)&buf[8 + 12]));
+ printk(KERN_CONT ", %dkB Cache\n", be16_to_cpup((__be16 *)&buf[8 + 12]));
return nslots;
}
@@ -1802,43 +1795,43 @@ static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
static const struct cd_list_entry ide_cd_quirks_list[] = {
/* Limit transfer size per interrupt. */
- { "SAMSUNG CD-ROM SCR-2430", NULL, IDE_CD_FLAG_LIMIT_NFRAMES },
- { "SAMSUNG CD-ROM SCR-2432", NULL, IDE_CD_FLAG_LIMIT_NFRAMES },
+ { "SAMSUNG CD-ROM SCR-2430", NULL, IDE_AFLAG_LIMIT_NFRAMES },
+ { "SAMSUNG CD-ROM SCR-2432", NULL, IDE_AFLAG_LIMIT_NFRAMES },
/* SCR-3231 doesn't support the SET_CD_SPEED command. */
- { "SAMSUNG CD-ROM SCR-3231", NULL, IDE_CD_FLAG_NO_SPEED_SELECT },
+ { "SAMSUNG CD-ROM SCR-3231", NULL, IDE_AFLAG_NO_SPEED_SELECT },
/* Old NEC260 (not R) was released before ATAPI 1.2 spec. */
- { "NEC CD-ROM DRIVE:260", "1.01", IDE_CD_FLAG_TOCADDR_AS_BCD |
- IDE_CD_FLAG_PRE_ATAPI12, },
+ { "NEC CD-ROM DRIVE:260", "1.01", IDE_AFLAG_TOCADDR_AS_BCD |
+ IDE_AFLAG_PRE_ATAPI12, },
/* Vertos 300, some versions of this drive like to talk BCD. */
- { "V003S0DS", NULL, IDE_CD_FLAG_VERTOS_300_SSD, },
+ { "V003S0DS", NULL, IDE_AFLAG_VERTOS_300_SSD, },
/* Vertos 600 ESD. */
- { "V006E0DS", NULL, IDE_CD_FLAG_VERTOS_600_ESD, },
+ { "V006E0DS", NULL, IDE_AFLAG_VERTOS_600_ESD, },
/*
* Sanyo 3 CD changer uses a non-standard command for CD changing
* (by default standard ATAPI support for CD changers is used).
*/
- { "CD-ROM CDR-C3 G", NULL, IDE_CD_FLAG_SANYO_3CD },
- { "CD-ROM CDR-C3G", NULL, IDE_CD_FLAG_SANYO_3CD },
- { "CD-ROM CDR_C36", NULL, IDE_CD_FLAG_SANYO_3CD },
+ { "CD-ROM CDR-C3 G", NULL, IDE_AFLAG_SANYO_3CD },
+ { "CD-ROM CDR-C3G", NULL, IDE_AFLAG_SANYO_3CD },
+ { "CD-ROM CDR_C36", NULL, IDE_AFLAG_SANYO_3CD },
/* Stingray 8X CD-ROM. */
- { "STINGRAY 8422 IDE 8X CD-ROM 7-27-95", NULL, IDE_CD_FLAG_PRE_ATAPI12},
+ { "STINGRAY 8422 IDE 8X CD-ROM 7-27-95", NULL, IDE_AFLAG_PRE_ATAPI12 },
/*
* ACER 50X CD-ROM and WPI 32X CD-ROM require the full spec length
* mode sense page capabilities size, but older drives break.
*/
- { "ATAPI CD ROM DRIVE 50X MAX", NULL, IDE_CD_FLAG_FULL_CAPS_PAGE },
- { "WPI CDS-32X", NULL, IDE_CD_FLAG_FULL_CAPS_PAGE },
+ { "ATAPI CD ROM DRIVE 50X MAX", NULL, IDE_AFLAG_FULL_CAPS_PAGE },
+ { "WPI CDS-32X", NULL, IDE_AFLAG_FULL_CAPS_PAGE },
/* ACER/AOpen 24X CD-ROM has the speed fields byte-swapped. */
- { "", "241N", IDE_CD_FLAG_LE_SPEED_FIELDS },
+ { "", "241N", IDE_AFLAG_LE_SPEED_FIELDS },
/*
* Some drives used by Apple don't advertise audio play
* but they do support reading TOC & audio datas.
*/
- { "MATSHITADVD-ROM SR-8187", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK },
- { "MATSHITADVD-ROM SR-8186", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK },
- { "MATSHITADVD-ROM SR-8176", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK },
- { "MATSHITADVD-ROM SR-8174", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK },
- { "Optiarc DVD RW AD-5200A", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK },
+ { "MATSHITADVD-ROM SR-8187", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
+ { "MATSHITADVD-ROM SR-8186", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
+ { "MATSHITADVD-ROM SR-8176", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
+ { "MATSHITADVD-ROM SR-8174", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
+ { "Optiarc DVD RW AD-5200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
{ NULL, NULL, 0 }
};
@@ -1873,20 +1866,20 @@ static int ide_cdrom_setup(ide_drive_t *drive)
drive->special.all = 0;
- cd->cd_flags = IDE_CD_FLAG_MEDIA_CHANGED | IDE_CD_FLAG_NO_EJECT |
+ drive->atapi_flags = IDE_AFLAG_MEDIA_CHANGED | IDE_AFLAG_NO_EJECT |
ide_cd_flags(id);
if ((id->config & 0x0060) == 0x20)
- cd->cd_flags |= IDE_CD_FLAG_DRQ_INTERRUPT;
+ drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT;
- if ((cd->cd_flags & IDE_CD_FLAG_VERTOS_300_SSD) &&
+ if ((drive->atapi_flags & IDE_AFLAG_VERTOS_300_SSD) &&
id->fw_rev[4] == '1' && id->fw_rev[6] <= '2')
- cd->cd_flags |= (IDE_CD_FLAG_TOCTRACKS_AS_BCD |
- IDE_CD_FLAG_TOCADDR_AS_BCD);
- else if ((cd->cd_flags & IDE_CD_FLAG_VERTOS_600_ESD) &&
+ drive->atapi_flags |= (IDE_AFLAG_TOCTRACKS_AS_BCD |
+ IDE_AFLAG_TOCADDR_AS_BCD);
+ else if ((drive->atapi_flags & IDE_AFLAG_VERTOS_600_ESD) &&
id->fw_rev[4] == '1' && id->fw_rev[6] <= '2')
- cd->cd_flags |= IDE_CD_FLAG_TOCTRACKS_AS_BCD;
- else if (cd->cd_flags & IDE_CD_FLAG_SANYO_3CD)
+ drive->atapi_flags |= IDE_AFLAG_TOCTRACKS_AS_BCD;
+ else if (drive->atapi_flags & IDE_AFLAG_SANYO_3CD)
/* 3 => use CD in slot 0 */
cdi->sanyo_slot = 3;
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index fe0ea36e412..61a4599b77d 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -27,42 +27,6 @@
#define ATAPI_CAPABILITIES_PAGE_SIZE (8 + 20)
#define ATAPI_CAPABILITIES_PAGE_PAD_SIZE 4
-enum {
- /* Device sends an interrupt when ready for a packet command. */
- IDE_CD_FLAG_DRQ_INTERRUPT = (1 << 0),
- /* Drive cannot lock the door. */
- IDE_CD_FLAG_NO_DOORLOCK = (1 << 1),
- /* Drive cannot eject the disc. */
- IDE_CD_FLAG_NO_EJECT = (1 << 2),
- /* Drive is a pre ATAPI 1.2 drive. */
- IDE_CD_FLAG_PRE_ATAPI12 = (1 << 3),
- /* TOC addresses are in BCD. */
- IDE_CD_FLAG_TOCADDR_AS_BCD = (1 << 4),
- /* TOC track numbers are in BCD. */
- IDE_CD_FLAG_TOCTRACKS_AS_BCD = (1 << 5),
- /*
- * Drive does not provide data in multiples of SECTOR_SIZE
- * when more than one interrupt is needed.
- */
- IDE_CD_FLAG_LIMIT_NFRAMES = (1 << 6),
- /* Seeking in progress. */
- IDE_CD_FLAG_SEEKING = (1 << 7),
- /* Driver has noticed a media change. */
- IDE_CD_FLAG_MEDIA_CHANGED = (1 << 8),
- /* Saved TOC information is current. */
- IDE_CD_FLAG_TOC_VALID = (1 << 9),
- /* We think that the drive door is locked. */
- IDE_CD_FLAG_DOOR_LOCKED = (1 << 10),
- /* SET_CD_SPEED command is unsupported. */
- IDE_CD_FLAG_NO_SPEED_SELECT = (1 << 11),
- IDE_CD_FLAG_VERTOS_300_SSD = (1 << 12),
- IDE_CD_FLAG_VERTOS_600_ESD = (1 << 13),
- IDE_CD_FLAG_SANYO_3CD = (1 << 14),
- IDE_CD_FLAG_FULL_CAPS_PAGE = (1 << 15),
- IDE_CD_FLAG_PLAY_AUDIO_OK = (1 << 16),
- IDE_CD_FLAG_LE_SPEED_FIELDS = (1 << 17),
-};
-
/* Structure of a MSF cdrom address. */
struct atapi_msf {
byte reserved;
@@ -128,8 +92,6 @@ struct cdrom_info {
unsigned long last_block;
unsigned long start_seek;
- unsigned int cd_flags;
-
u8 max_speed; /* Max speed of the drive. */
u8 current_speed; /* Current speed of the drive. */
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 24d002addf7..74231b41f61 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -27,10 +27,9 @@ int ide_cdrom_open_real(struct cdrom_device_info *cdi, int purpose)
void ide_cdrom_release_real(struct cdrom_device_info *cdi)
{
ide_drive_t *drive = cdi->handle;
- struct cdrom_info *cd = drive->driver_data;
if (!cdi->use_count)
- cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID;
+ drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
}
/*
@@ -83,13 +82,12 @@ int ide_cdrom_check_media_change_real(struct cdrom_device_info *cdi,
int slot_nr)
{
ide_drive_t *drive = cdi->handle;
- struct cdrom_info *cd = drive->driver_data;
int retval;
if (slot_nr == CDSL_CURRENT) {
(void) cdrom_check_status(drive, NULL);
- retval = (cd->cd_flags & IDE_CD_FLAG_MEDIA_CHANGED) ? 1 : 0;
- cd->cd_flags &= ~IDE_CD_FLAG_MEDIA_CHANGED;
+ retval = (drive->atapi_flags & IDE_AFLAG_MEDIA_CHANGED) ? 1 : 0;
+ drive->atapi_flags &= ~IDE_AFLAG_MEDIA_CHANGED;
return retval;
} else {
return -EINVAL;
@@ -107,11 +105,11 @@ int cdrom_eject(ide_drive_t *drive, int ejectflag,
char loej = 0x02;
unsigned char cmd[BLK_MAX_CDB];
- if ((cd->cd_flags & IDE_CD_FLAG_NO_EJECT) && !ejectflag)
+ if ((drive->atapi_flags & IDE_AFLAG_NO_EJECT) && !ejectflag)
return -EDRIVE_CANT_DO_THIS;
/* reload fails on some drives, if the tray is locked */
- if ((cd->cd_flags & IDE_CD_FLAG_DOOR_LOCKED) && ejectflag)
+ if ((drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED) && ejectflag)
return 0;
/* only tell drive to close tray if open, if it can do that */
@@ -123,7 +121,7 @@ int cdrom_eject(ide_drive_t *drive, int ejectflag,
cmd[0] = GPCMD_START_STOP_UNIT;
cmd[4] = loej | (ejectflag != 0);
- return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, sense, 0, 0);
+ return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, 0);
}
/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */
@@ -131,7 +129,6 @@ static
int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
struct request_sense *sense)
{
- struct cdrom_info *cd = drive->driver_data;
struct request_sense my_sense;
int stat;
@@ -139,7 +136,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
sense = &my_sense;
/* If the drive cannot lock the door, just pretend. */
- if (cd->cd_flags & IDE_CD_FLAG_NO_DOORLOCK) {
+ if (drive->atapi_flags & IDE_AFLAG_NO_DOORLOCK) {
stat = 0;
} else {
unsigned char cmd[BLK_MAX_CDB];
@@ -149,7 +146,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
cmd[4] = lockflag ? 1 : 0;
- stat = ide_cd_queue_pc(drive, cmd, 0, NULL, 0,
+ stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL,
sense, 0, 0);
}
@@ -160,7 +157,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
(sense->asc == 0x24 || sense->asc == 0x20)) {
printk(KERN_ERR "%s: door locking not supported\n",
drive->name);
- cd->cd_flags |= IDE_CD_FLAG_NO_DOORLOCK;
+ drive->atapi_flags |= IDE_AFLAG_NO_DOORLOCK;
stat = 0;
}
@@ -170,9 +167,9 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
if (stat == 0) {
if (lockflag)
- cd->cd_flags |= IDE_CD_FLAG_DOOR_LOCKED;
+ drive->atapi_flags |= IDE_AFLAG_DOOR_LOCKED;
else
- cd->cd_flags &= ~IDE_CD_FLAG_DOOR_LOCKED;
+ drive->atapi_flags &= ~IDE_AFLAG_DOOR_LOCKED;
}
return stat;
@@ -231,7 +228,7 @@ int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed)
cmd[5] = speed & 0xff;
}
- stat = ide_cd_queue_pc(drive, cmd, 0, NULL, 0, &sense, 0, 0);
+ stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
if (!ide_cdrom_get_capabilities(drive, buf)) {
ide_cdrom_update_speed(drive, buf);
@@ -250,7 +247,7 @@ int ide_cdrom_get_last_session(struct cdrom_device_info *cdi,
struct request_sense sense;
int ret;
- if ((info->cd_flags & IDE_CD_FLAG_TOC_VALID) == 0 || !info->toc) {
+ if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0 || !info->toc) {
ret = ide_cd_read_toc(drive, &sense);
if (ret)
return ret;
@@ -308,7 +305,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
* A reset will unlock the door. If it was previously locked,
* lock it again.
*/
- if (cd->cd_flags & IDE_CD_FLAG_DOOR_LOCKED)
+ if (drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED)
(void)ide_cd_lockdoor(drive, 1, &sense);
return ret;
@@ -324,7 +321,7 @@ static int ide_cd_get_toc_entry(ide_drive_t *drive, int track,
/*
* don't serve cached data, if the toc isn't valid
*/
- if ((info->cd_flags & IDE_CD_FLAG_TOC_VALID) == 0)
+ if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0)
return -EINVAL;
/* Check validity of requested track number. */
@@ -374,7 +371,7 @@ static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg)
lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]);
lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]);
- return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, &sense, 0, 0);
+ return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
}
static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg)
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 3a2e80237c1..df5fe575687 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -158,7 +158,7 @@ static void ide_tf_set_cmd(ide_drive_t *drive, ide_task_t *task, u8 dma)
write = (task->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
if (dma)
- index = drive->vdma ? 4 : 8;
+ index = 8;
else
index = drive->mult_count ? 0 : 4;
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 7ee44f86bc5..be99d463dcc 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -100,10 +100,11 @@ static const struct drive_list_entry drive_blacklist [] = {
ide_startstop_t ide_dma_intr (ide_drive_t *drive)
{
+ ide_hwif_t *hwif = drive->hwif;
u8 stat = 0, dma_stat = 0;
- dma_stat = drive->hwif->dma_ops->dma_end(drive);
- stat = ide_read_status(drive);
+ dma_stat = hwif->dma_ops->dma_end(drive);
+ stat = hwif->tp_ops->read_status(hwif);
if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
if (!dma_stat) {
@@ -334,7 +335,7 @@ static int config_drive_for_dma (ide_drive_t *drive)
static int dma_timer_expiry (ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
- u8 dma_stat = hwif->INB(hwif->dma_status);
+ u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n",
drive->name, dma_stat);
@@ -369,14 +370,18 @@ void ide_dma_host_set(ide_drive_t *drive, int on)
{
ide_hwif_t *hwif = HWIF(drive);
u8 unit = (drive->select.b.unit & 0x01);
- u8 dma_stat = hwif->INB(hwif->dma_status);
+ u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
if (on)
dma_stat |= (1 << (5 + unit));
else
dma_stat &= ~(1 << (5 + unit));
- hwif->OUTB(dma_stat, hwif->dma_status);
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ writeb(dma_stat,
+ (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
+ else
+ outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
}
EXPORT_SYMBOL_GPL(ide_dma_host_set);
@@ -449,6 +454,7 @@ int ide_dma_setup(ide_drive_t *drive)
ide_hwif_t *hwif = drive->hwif;
struct request *rq = HWGROUP(drive)->rq;
unsigned int reading;
+ u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
u8 dma_stat;
if (rq_data_dir(rq))
@@ -470,13 +476,21 @@ int ide_dma_setup(ide_drive_t *drive)
outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS);
/* specify r/w */
- hwif->OUTB(reading, hwif->dma_command);
+ if (mmio)
+ writeb(reading, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
+ else
+ outb(reading, hwif->dma_base + ATA_DMA_CMD);
- /* read dma_status for INTR & ERROR flags */
- dma_stat = hwif->INB(hwif->dma_status);
+ /* read DMA status for INTR & ERROR flags */
+ dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
/* clear INTR & ERROR flags */
- hwif->OUTB(dma_stat|6, hwif->dma_status);
+ if (mmio)
+ writeb(dma_stat | 6,
+ (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
+ else
+ outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
+
drive->waiting_for_dma = 1;
return 0;
}
@@ -492,16 +506,24 @@ EXPORT_SYMBOL_GPL(ide_dma_exec_cmd);
void ide_dma_start(ide_drive_t *drive)
{
- ide_hwif_t *hwif = HWIF(drive);
- u8 dma_cmd = hwif->INB(hwif->dma_command);
+ ide_hwif_t *hwif = drive->hwif;
+ u8 dma_cmd;
/* Note that this is done *after* the cmd has
* been issued to the drive, as per the BM-IDE spec.
* The Promise Ultra33 doesn't work correctly when
* we do this part before issuing the drive cmd.
*/
- /* start DMA */
- hwif->OUTB(dma_cmd|1, hwif->dma_command);
+ if (hwif->host_flags & IDE_HFLAG_MMIO) {
+ dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
+ /* start DMA */
+ writeb(dma_cmd | 1,
+ (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
+ } else {
+ dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
+ outb(dma_cmd | 1, hwif->dma_base + ATA_DMA_CMD);
+ }
+
hwif->dma = 1;
wmb();
}
@@ -511,18 +533,33 @@ EXPORT_SYMBOL_GPL(ide_dma_start);
/* returns 1 on error, 0 otherwise */
int __ide_dma_end (ide_drive_t *drive)
{
- ide_hwif_t *hwif = HWIF(drive);
+ ide_hwif_t *hwif = drive->hwif;
+ u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
u8 dma_stat = 0, dma_cmd = 0;
drive->waiting_for_dma = 0;
- /* get dma_command mode */
- dma_cmd = hwif->INB(hwif->dma_command);
- /* stop DMA */
- hwif->OUTB(dma_cmd&~1, hwif->dma_command);
+
+ if (mmio) {
+ /* get DMA command mode */
+ dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
+ /* stop DMA */
+ writeb(dma_cmd & ~1,
+ (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
+ } else {
+ dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
+ outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
+ }
+
/* get DMA status */
- dma_stat = hwif->INB(hwif->dma_status);
- /* clear the INTR & ERROR bits */
- hwif->OUTB(dma_stat|6, hwif->dma_status);
+ dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
+
+ if (mmio)
+ /* clear the INTR & ERROR bits */
+ writeb(dma_stat | 6,
+ (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
+ else
+ outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
+
/* purge DMA mappings */
ide_destroy_dmatable(drive);
/* verify good DMA status */
@@ -537,7 +574,7 @@ EXPORT_SYMBOL(__ide_dma_end);
int ide_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
- u8 dma_stat = hwif->INB(hwif->dma_status);
+ u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
/* return 1 if INTR asserted */
if ((dma_stat & 4) == 4)
@@ -719,9 +756,8 @@ static int ide_tune_dma(ide_drive_t *drive)
static int ide_dma_check(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- int vdma = (hwif->host_flags & IDE_HFLAG_VDMA)? 1 : 0;
- if (!vdma && ide_tune_dma(drive))
+ if (ide_tune_dma(drive))
return 0;
/* TODO: always do PIO fallback */
@@ -730,7 +766,7 @@ static int ide_dma_check(ide_drive_t *drive)
ide_set_max_pio(drive);
- return vdma ? 0 : -1;
+ return -1;
}
int ide_id_dma_bug(ide_drive_t *drive)
@@ -842,7 +878,7 @@ int ide_allocate_dma_engine(ide_hwif_t *hwif)
}
EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
-static const struct ide_dma_ops sff_dma_ops = {
+const struct ide_dma_ops sff_dma_ops = {
.dma_host_set = ide_dma_host_set,
.dma_setup = ide_dma_setup,
.dma_exec_cmd = ide_dma_exec_cmd,
@@ -852,18 +888,5 @@ static const struct ide_dma_ops sff_dma_ops = {
.dma_timeout = ide_dma_timeout,
.dma_lost_irq = ide_dma_lost_irq,
};
-
-void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
-{
- hwif->dma_base = base;
-
- if (!hwif->dma_command)
- hwif->dma_command = hwif->dma_base + 0;
- if (!hwif->dma_status)
- hwif->dma_status = hwif->dma_base + 2;
-
- hwif->dma_ops = &sff_dma_ops;
-}
-
-EXPORT_SYMBOL_GPL(ide_setup_dma);
+EXPORT_SYMBOL_GPL(sff_dma_ops);
#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 011d72011cc..3d8e6dd0f41 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -125,26 +125,10 @@ typedef struct ide_floppy_obj {
int wp;
/* Supports format progress report */
int srfp;
- /* Status/Action flags */
- unsigned long flags;
} idefloppy_floppy_t;
#define IDEFLOPPY_TICKS_DELAY HZ/20 /* default delay for ZIP 100 (50ms) */
-/* Floppy flag bits values. */
-enum {
- /* DRQ interrupt device */
- IDEFLOPPY_FLAG_DRQ_INTERRUPT = (1 << 0),
- /* Media may have changed */
- IDEFLOPPY_FLAG_MEDIA_CHANGED = (1 << 1),
- /* Format in progress */
- IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS = (1 << 2),
- /* Avoid commands not supported in Clik drive */
- IDEFLOPPY_FLAG_CLIK_DRIVE = (1 << 3),
- /* Requires BH algorithm for packets */
- IDEFLOPPY_FLAG_ZIP_DRIVE = (1 << 4),
-};
-
/* Defines for the MODE SENSE command */
#define MODE_SENSE_CURRENT 0x00
#define MODE_SENSE_CHANGEABLE 0x01
@@ -247,9 +231,9 @@ static void ide_floppy_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
data = bvec_kmap_irq(bvec, &flags);
if (direction)
- hwif->output_data(drive, NULL, data, count);
+ hwif->tp_ops->output_data(drive, NULL, data, count);
else
- hwif->input_data(drive, NULL, data, count);
+ hwif->tp_ops->input_data(drive, NULL, data, count);
bvec_kunmap_irq(data, &flags);
bcount -= count;
@@ -291,6 +275,7 @@ static void idefloppy_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
rq->cmd_type = REQ_TYPE_SPECIAL;
rq->cmd_flags |= REQ_PREEMPT;
rq->rq_disk = floppy->disk;
+ memcpy(rq->cmd, pc->c, 12);
ide_do_drive_cmd(drive, rq);
}
@@ -354,7 +339,6 @@ static void idefloppy_init_pc(struct ide_atapi_pc *pc)
memset(pc, 0, sizeof(*pc));
pc->buf = pc->pc_buf;
pc->buf_size = IDEFLOPPY_PC_BUFFER_SIZE;
- pc->callback = ide_floppy_callback;
}
static void idefloppy_create_request_sense_cmd(struct ide_atapi_pc *pc)
@@ -402,7 +386,7 @@ static int idefloppy_transfer_pc(ide_drive_t *drive)
idefloppy_floppy_t *floppy = drive->driver_data;
/* Send the actual packet */
- drive->hwif->output_data(drive, NULL, floppy->pc->c, 12);
+ drive->hwif->tp_ops->output_data(drive, NULL, floppy->pc->c, 12);
/* Timeout for the packet command */
return IDEFLOPPY_WAIT_CMD;
@@ -429,7 +413,7 @@ static ide_startstop_t idefloppy_start_pc_transfer(ide_drive_t *drive)
* 40 and 50msec work well. idefloppy_pc_intr will not be actually
* used until after the packet is moved in about 50 msec.
*/
- if (pc->flags & PC_FLAG_ZIP_DRIVE) {
+ if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) {
timeout = floppy->ticks;
expiry = &idefloppy_transfer_pc;
} else {
@@ -474,7 +458,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
pc->error = IDEFLOPPY_ERROR_GENERAL;
floppy->failed_pc = NULL;
- pc->callback(drive);
+ drive->pc_callback(drive);
return ide_stopped;
}
@@ -574,6 +558,8 @@ static void idefloppy_create_rw_cmd(idefloppy_floppy_t *floppy,
put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]);
put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]);
+ memcpy(rq->cmd, pc->c, 12);
+
pc->rq = rq;
pc->b_count = cmd == READ ? 0 : rq->bio->bi_size;
if (rq->cmd_flags & REQ_RW)
@@ -647,12 +633,6 @@ static ide_startstop_t idefloppy_do_request(ide_drive_t *drive,
return ide_stopped;
}
- if (floppy->flags & IDEFLOPPY_FLAG_DRQ_INTERRUPT)
- pc->flags |= PC_FLAG_DRQ_INTERRUPT;
-
- if (floppy->flags & IDEFLOPPY_FLAG_ZIP_DRIVE)
- pc->flags |= PC_FLAG_ZIP_DRIVE;
-
pc->rq = rq;
return idefloppy_issue_pc(drive, pc);
@@ -671,6 +651,7 @@ static int idefloppy_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->buffer = (char *) pc;
rq->cmd_type = REQ_TYPE_SPECIAL;
+ memcpy(rq->cmd, pc->c, 12);
error = blk_execute_rq(drive->queue, floppy->disk, rq, 0);
blk_put_request(rq);
@@ -795,7 +776,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
switch (pc.buf[desc_start + 4] & 0x03) {
/* Clik! drive returns this instead of CAPACITY_CURRENT */
case CAPACITY_UNFORMATTED:
- if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE))
+ if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE))
/*
* If it is not a clik drive, break out
* (maintains previous driver behaviour)
@@ -841,7 +822,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
}
/* Clik! disk does not support get_flexible_disk_page */
- if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE))
+ if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE))
(void) ide_floppy_get_flexible_disk_page(drive);
set_capacity(floppy->disk, floppy->blocks * floppy->bs_factor);
@@ -949,11 +930,12 @@ static int idefloppy_get_format_progress(ide_drive_t *drive, int __user *arg)
/* Else assume format_unit has finished, and we're at 0x10000 */
} else {
+ ide_hwif_t *hwif = drive->hwif;
unsigned long flags;
u8 stat;
local_irq_save(flags);
- stat = ide_read_status(drive);
+ stat = hwif->tp_ops->read_status(hwif);
local_irq_restore(flags);
progress_indication = ((stat & SEEK_STAT) == 0) ? 0 : 0x10000;
@@ -1039,9 +1021,10 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
*((u16 *) &gcw) = drive->id->config;
floppy->pc = floppy->pc_stack;
+ drive->pc_callback = ide_floppy_callback;
if (((gcw[0] & 0x60) >> 5) == 1)
- floppy->flags |= IDEFLOPPY_FLAG_DRQ_INTERRUPT;
+ drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT;
/*
* We used to check revisions here. At this point however I'm giving up.
* Just assume they are all broken, its easier.
@@ -1052,7 +1035,7 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
* we'll leave the limitation below for the 2.2.x tree.
*/
if (!strncmp(drive->id->model, "IOMEGA ZIP 100 ATAPI", 20)) {
- floppy->flags |= IDEFLOPPY_FLAG_ZIP_DRIVE;
+ drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE;
/* This value will be visible in the /proc/ide/hdx/settings */
floppy->ticks = IDEFLOPPY_TICKS_DELAY;
blk_queue_max_sectors(drive->queue, 64);
@@ -1064,7 +1047,7 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
*/
if (strncmp(drive->id->model, "IOMEGA Clik!", 11) == 0) {
blk_queue_max_sectors(drive->queue, 64);
- floppy->flags |= IDEFLOPPY_FLAG_CLIK_DRIVE;
+ drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE;
}
(void) ide_floppy_get_capacity(drive);
@@ -1153,7 +1136,7 @@ static int idefloppy_open(struct inode *inode, struct file *filp)
floppy->openers++;
if (floppy->openers == 1) {
- floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS;
+ drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
/* Just in case */
idefloppy_init_pc(&pc);
@@ -1180,14 +1163,14 @@ static int idefloppy_open(struct inode *inode, struct file *filp)
ret = -EROFS;
goto out_put_floppy;
}
- floppy->flags |= IDEFLOPPY_FLAG_MEDIA_CHANGED;
+ drive->atapi_flags |= IDE_AFLAG_MEDIA_CHANGED;
/* IOMEGA Clik! drives do not support lock/unlock commands */
- if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) {
+ if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) {
idefloppy_create_prevent_cmd(&pc, 1);
(void) idefloppy_queue_pc_tail(drive, &pc);
}
check_disk_change(inode->i_bdev);
- } else if (floppy->flags & IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS) {
+ } else if (drive->atapi_flags & IDE_AFLAG_FORMAT_IN_PROGRESS) {
ret = -EBUSY;
goto out_put_floppy;
}
@@ -1210,12 +1193,12 @@ static int idefloppy_release(struct inode *inode, struct file *filp)
if (floppy->openers == 1) {
/* IOMEGA Clik! drives do not support lock/unlock commands */
- if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) {
+ if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) {
idefloppy_create_prevent_cmd(&pc, 0);
(void) idefloppy_queue_pc_tail(drive, &pc);
}
- floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS;
+ drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
}
floppy->openers--;
@@ -1236,15 +1219,17 @@ static int idefloppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static int ide_floppy_lockdoor(idefloppy_floppy_t *floppy,
- struct ide_atapi_pc *pc, unsigned long arg, unsigned int cmd)
+static int ide_floppy_lockdoor(ide_drive_t *drive, struct ide_atapi_pc *pc,
+ unsigned long arg, unsigned int cmd)
{
+ idefloppy_floppy_t *floppy = drive->driver_data;
+
if (floppy->openers > 1)
return -EBUSY;
/* The IOMEGA Clik! Drive doesn't support this command -
* no room for an eject mechanism */
- if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) {
+ if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) {
int prevent = arg ? 1 : 0;
if (cmd == CDROMEJECT)
@@ -1265,16 +1250,17 @@ static int ide_floppy_lockdoor(idefloppy_floppy_t *floppy,
static int ide_floppy_format_unit(idefloppy_floppy_t *floppy,
int __user *arg)
{
- int blocks, length, flags, err = 0;
struct ide_atapi_pc pc;
+ ide_drive_t *drive = floppy->drive;
+ int blocks, length, flags, err = 0;
if (floppy->openers > 1) {
/* Don't format if someone is using the disk */
- floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS;
+ drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
return -EBUSY;
}
- floppy->flags |= IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS;
+ drive->atapi_flags |= IDE_AFLAG_FORMAT_IN_PROGRESS;
/*
* Send ATAPI_FORMAT_UNIT to the drive.
@@ -1298,15 +1284,15 @@ static int ide_floppy_format_unit(idefloppy_floppy_t *floppy,
goto out;
}
- (void) idefloppy_get_sfrp_bit(floppy->drive);
+ (void) idefloppy_get_sfrp_bit(drive);
idefloppy_create_format_unit_cmd(&pc, blocks, length, flags);
- if (idefloppy_queue_pc_tail(floppy->drive, &pc))
+ if (idefloppy_queue_pc_tail(drive, &pc))
err = -EIO;
out:
if (err)
- floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS;
+ drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
return err;
}
@@ -1325,7 +1311,7 @@ static int idefloppy_ioctl(struct inode *inode, struct file *file,
case CDROMEJECT:
/* fall through */
case CDROM_LOCKDOOR:
- return ide_floppy_lockdoor(floppy, &pc, arg, cmd);
+ return ide_floppy_lockdoor(drive, &pc, arg, cmd);
case IDEFLOPPY_IOCTL_FORMAT_SUPPORTED:
return 0;
case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY:
@@ -1366,8 +1352,8 @@ static int idefloppy_media_changed(struct gendisk *disk)
drive->attach = 0;
return 0;
}
- ret = !!(floppy->flags & IDEFLOPPY_FLAG_MEDIA_CHANGED);
- floppy->flags &= ~IDEFLOPPY_FLAG_MEDIA_CHANGED;
+ ret = !!(drive->atapi_flags & IDE_AFLAG_MEDIA_CHANGED);
+ drive->atapi_flags &= ~IDE_AFLAG_MEDIA_CHANGED;
return ret;
}
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
index 2d92214096a..31d98fec775 100644
--- a/drivers/ide/ide-generic.c
+++ b/drivers/ide/ide-generic.c
@@ -28,29 +28,21 @@ MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports");
static ssize_t store_add(struct class *cls, const char *buf, size_t n)
{
- ide_hwif_t *hwif;
unsigned int base, ctl;
- int irq;
- hw_regs_t hw;
- u8 idx[] = { 0xff, 0xff, 0xff, 0xff };
+ int irq, rc;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3)
return -EINVAL;
- hwif = ide_find_port();
- if (hwif == NULL)
- return -ENOENT;
-
memset(&hw, 0, sizeof(hw));
ide_std_init_ports(&hw, base, ctl);
hw.irq = irq;
hw.chipset = ide_generic;
- ide_init_port_hw(hwif, &hw);
-
- idx[0] = hwif->index;
-
- ide_device_add(idx, NULL);
+ rc = ide_host_add(NULL, hws, NULL);
+ if (rc)
+ return rc;
return n;
};
@@ -90,18 +82,18 @@ static int __init ide_generic_sysfs_init(void)
static int __init ide_generic_init(void)
{
- u8 idx[MAX_HWIFS];
- int i;
+ hw_regs_t hw[MAX_HWIFS], *hws[MAX_HWIFS];
+ struct ide_host *host;
+ unsigned long io_addr;
+ int i, rc;
printk(KERN_INFO DRV_NAME ": please use \"probe_mask=0x3f\" module "
"parameter for probing all legacy ISA IDE ports\n");
for (i = 0; i < MAX_HWIFS; i++) {
- ide_hwif_t *hwif;
- unsigned long io_addr = ide_default_io_base(i);
- hw_regs_t hw;
+ io_addr = ide_default_io_base(i);
- idx[i] = 0xff;
+ hws[i] = NULL;
if ((probe_mask & (1 << i)) && io_addr) {
if (!request_region(io_addr, 8, DRV_NAME)) {
@@ -119,33 +111,42 @@ static int __init ide_generic_init(void)
continue;
}
- /*
- * Skip probing if the corresponding
- * slot is already occupied.
- */
- hwif = ide_find_port();
- if (hwif == NULL || hwif->index != i) {
- idx[i] = 0xff;
- continue;
- }
-
- memset(&hw, 0, sizeof(hw));
- ide_std_init_ports(&hw, io_addr, io_addr + 0x206);
- hw.irq = ide_default_irq(io_addr);
- hw.chipset = ide_generic;
- ide_init_port_hw(hwif, &hw);
+ memset(&hw[i], 0, sizeof(hw[i]));
+ ide_std_init_ports(&hw[i], io_addr, io_addr + 0x206);
+ hw[i].irq = ide_default_irq(io_addr);
+ hw[i].chipset = ide_generic;
- idx[i] = i;
+ hws[i] = &hw[i];
}
}
- ide_device_add_all(idx, NULL);
+ host = ide_host_alloc_all(NULL, hws);
+ if (host == NULL) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = ide_host_register(host, NULL, hws);
+ if (rc)
+ goto err_free;
if (ide_generic_sysfs_init())
printk(KERN_ERR DRV_NAME ": failed to create ide_generic "
"class\n");
return 0;
+err_free:
+ ide_host_free(host);
+err:
+ for (i = 0; i < MAX_HWIFS; i++) {
+ if (hws[i] == NULL)
+ continue;
+
+ io_addr = hws[i]->io_ports.data_addr;
+ release_region(io_addr + 0x206, 1);
+ release_region(io_addr, 8);
+ }
+ return rc;
}
module_init(ide_generic_init);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 661b75a89d4..a896a283f27 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -330,7 +330,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
tf->error = err;
tf->status = stat;
- drive->hwif->tf_read(drive, task);
+ drive->hwif->tp_ops->tf_read(drive, task);
if (task->tf_flags & IDE_TFLAG_DYN)
kfree(task);
@@ -381,8 +381,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
if (err == ABRT_ERR) {
if (drive->select.b.lba &&
/* some newer drives don't support WIN_SPECIFY */
- hwif->INB(hwif->io_ports.command_addr) ==
- WIN_SPECIFY)
+ hwif->tp_ops->read_status(hwif) == WIN_SPECIFY)
return ide_stopped;
} else if ((err & BAD_CRC) == BAD_CRC) {
/* UDMA crc error, just retry the operation */
@@ -408,7 +407,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
return ide_stopped;
}
- if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
+ if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
rq->errors |= ERROR_RESET;
if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
@@ -435,10 +434,9 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u
/* add decoding error stuff */
}
- if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
+ if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
/* force an abort */
- hwif->OUTBSYNC(hwif, WIN_IDLEIMMEDIATE,
- hwif->io_ports.command_addr);
+ hwif->tp_ops->exec_command(hwif, WIN_IDLEIMMEDIATE);
if (rq->errors >= ERROR_MAX) {
ide_kill_rq(drive, rq);
@@ -712,7 +710,8 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
#ifdef DEBUG
printk("%s: DRIVE_CMD (null)\n", drive->name);
#endif
- ide_end_drive_cmd(drive, ide_read_status(drive), ide_read_error(drive));
+ ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif),
+ ide_read_error(drive));
return ide_stopped;
}
@@ -747,16 +746,17 @@ static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
* the bus may be broken enough to walk on our toes at this
* point.
*/
+ ide_hwif_t *hwif = drive->hwif;
int rc;
#ifdef DEBUG_PM
printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
#endif
- rc = ide_wait_not_busy(HWIF(drive), 35000);
+ rc = ide_wait_not_busy(hwif, 35000);
if (rc)
printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
SELECT_DRIVE(drive);
- ide_set_irq(drive, 1);
- rc = ide_wait_not_busy(HWIF(drive), 100000);
+ hwif->tp_ops->set_irq(hwif, 1);
+ rc = ide_wait_not_busy(hwif, 100000);
if (rc)
printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
}
@@ -1042,7 +1042,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
* quirk_list may not like intr setups/cleanups
*/
if (drive->quirk_list != 1)
- ide_set_irq(drive, 0);
+ hwif->tp_ops->set_irq(hwif, 0);
}
hwgroup->hwif = hwif;
hwgroup->drive = drive;
@@ -1142,7 +1142,7 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
(void)hwif->dma_ops->dma_end(drive);
ret = ide_error(drive, "dma timeout error",
- ide_read_status(drive));
+ hwif->tp_ops->read_status(hwif));
} else {
printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
hwif->dma_ops->dma_timeout(drive);
@@ -1267,7 +1267,7 @@ void ide_timer_expiry (unsigned long data)
} else
startstop =
ide_error(drive, "irq timeout",
- ide_read_status(drive));
+ hwif->tp_ops->read_status(hwif));
}
drive->service_time = jiffies - drive->service_start;
spin_lock_irq(&ide_lock);
@@ -1323,7 +1323,8 @@ static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
*/
do {
if (hwif->irq == irq) {
- stat = hwif->INB(hwif->io_ports.status_addr);
+ stat = hwif->tp_ops->read_status(hwif);
+
if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
/* Try to not flood the console with msgs */
static unsigned long last_msgtime, count;
@@ -1413,7 +1414,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
* Whack the status register, just in case
* we have a leftover pending IRQ.
*/
- (void) hwif->INB(hwif->io_ports.status_addr);
+ (void)hwif->tp_ops->read_status(hwif);
#endif /* CONFIG_BLK_DEV_IDEPCI */
}
spin_unlock_irqrestore(&ide_lock, flags);
@@ -1519,6 +1520,7 @@ EXPORT_SYMBOL(ide_do_drive_cmd);
void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
{
+ ide_hwif_t *hwif = drive->hwif;
ide_task_t task;
memset(&task, 0, sizeof(task));
@@ -1529,9 +1531,9 @@ void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
task.tf.lbah = (bcount >> 8) & 0xff;
ide_tf_dump(drive->name, &task.tf);
- ide_set_irq(drive, 1);
+ hwif->tp_ops->set_irq(hwif, 1);
SELECT_MASK(drive, 0);
- drive->hwif->tf_load(drive, &task);
+ hwif->tp_ops->tf_load(drive, &task);
}
EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
@@ -1543,9 +1545,9 @@ void ide_pad_transfer(ide_drive_t *drive, int write, int len)
while (len > 0) {
if (write)
- hwif->output_data(drive, NULL, buf, min(4, len));
+ hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
else
- hwif->input_data(drive, NULL, buf, min(4, len));
+ hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
len -= 4;
}
}
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 44aaec256a3..07da5fb9eaf 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -42,18 +42,6 @@ static void ide_outb (u8 val, unsigned long port)
outb(val, port);
}
-static void ide_outbsync(ide_hwif_t *hwif, u8 addr, unsigned long port)
-{
- outb(addr, port);
-}
-
-void default_hwif_iops (ide_hwif_t *hwif)
-{
- hwif->OUTB = ide_outb;
- hwif->OUTBSYNC = ide_outbsync;
- hwif->INB = ide_inb;
-}
-
/*
* MMIO operations, typically used for SATA controllers
*/
@@ -68,31 +56,19 @@ static void ide_mm_outb (u8 value, unsigned long port)
writeb(value, (void __iomem *) port);
}
-static void ide_mm_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port)
-{
- writeb(value, (void __iomem *) port);
-}
-
-void default_hwif_mmiops (ide_hwif_t *hwif)
-{
- hwif->OUTB = ide_mm_outb;
- /* Most systems will need to override OUTBSYNC, alas however
- this one is controller specific! */
- hwif->OUTBSYNC = ide_mm_outbsync;
- hwif->INB = ide_mm_inb;
-}
-
-EXPORT_SYMBOL(default_hwif_mmiops);
-
void SELECT_DRIVE (ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
const struct ide_port_ops *port_ops = hwif->port_ops;
+ ide_task_t task;
if (port_ops && port_ops->selectproc)
port_ops->selectproc(drive);
- hwif->OUTB(drive->select.all, hwif->io_ports.device_addr);
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_OUT_DEVICE;
+
+ drive->hwif->tp_ops->tf_load(drive, &task);
}
void SELECT_MASK(ide_drive_t *drive, int mask)
@@ -103,7 +79,61 @@ void SELECT_MASK(ide_drive_t *drive, int mask)
port_ops->maskproc(drive, mask);
}
-static void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
+void ide_exec_command(ide_hwif_t *hwif, u8 cmd)
+{
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
+ else
+ outb(cmd, hwif->io_ports.command_addr);
+}
+EXPORT_SYMBOL_GPL(ide_exec_command);
+
+u8 ide_read_status(ide_hwif_t *hwif)
+{
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ return readb((void __iomem *)hwif->io_ports.status_addr);
+ else
+ return inb(hwif->io_ports.status_addr);
+}
+EXPORT_SYMBOL_GPL(ide_read_status);
+
+u8 ide_read_altstatus(ide_hwif_t *hwif)
+{
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ return readb((void __iomem *)hwif->io_ports.ctl_addr);
+ else
+ return inb(hwif->io_ports.ctl_addr);
+}
+EXPORT_SYMBOL_GPL(ide_read_altstatus);
+
+u8 ide_read_sff_dma_status(ide_hwif_t *hwif)
+{
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ return readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
+ else
+ return inb(hwif->dma_base + ATA_DMA_STATUS);
+}
+EXPORT_SYMBOL_GPL(ide_read_sff_dma_status);
+
+void ide_set_irq(ide_hwif_t *hwif, int on)
+{
+ u8 ctl = ATA_DEVCTL_OBS;
+
+ if (on == 4) { /* hack for SRST */
+ ctl |= 4;
+ on &= ~4;
+ }
+
+ ctl |= on ? 0 : 2;
+
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
+ else
+ outb(ctl, hwif->io_ports.ctl_addr);
+}
+EXPORT_SYMBOL_GPL(ide_set_irq);
+
+void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
{
ide_hwif_t *hwif = drive->hwif;
struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -155,8 +185,9 @@ static void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
tf_outb((tf->device & HIHI) | drive->select.all,
io_ports->device_addr);
}
+EXPORT_SYMBOL_GPL(ide_tf_load);
-static void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
+void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
{
ide_hwif_t *hwif = drive->hwif;
struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -188,6 +219,8 @@ static void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
/* be sure we're looking at the low order bits */
tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
+ tf->feature = tf_inb(io_ports->feature_addr);
if (task->tf_flags & IDE_TFLAG_IN_NSECT)
tf->nsect = tf_inb(io_ports->nsect_addr);
if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -214,6 +247,7 @@ static void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
tf->hob_lbah = tf_inb(io_ports->lbah_addr);
}
}
+EXPORT_SYMBOL_GPL(ide_tf_read);
/*
* Some localbus EIDE interfaces require a special access sequence
@@ -236,8 +270,8 @@ static void ata_vlb_sync(unsigned long port)
* so if an odd len is specified, be sure that there's at least one
* extra byte allocated for the buffer.
*/
-static void ata_input_data(ide_drive_t *drive, struct request *rq,
- void *buf, unsigned int len)
+void ide_input_data(ide_drive_t *drive, struct request *rq, void *buf,
+ unsigned int len)
{
ide_hwif_t *hwif = drive->hwif;
struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -277,12 +311,13 @@ static void ata_input_data(ide_drive_t *drive, struct request *rq,
insw(data_addr, buf, len / 2);
}
}
+EXPORT_SYMBOL_GPL(ide_input_data);
/*
* This is used for most PIO data transfers *to* the IDE interface
*/
-static void ata_output_data(ide_drive_t *drive, struct request *rq,
- void *buf, unsigned int len)
+void ide_output_data(ide_drive_t *drive, struct request *rq, void *buf,
+ unsigned int len)
{
ide_hwif_t *hwif = drive->hwif;
struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -320,15 +355,50 @@ static void ata_output_data(ide_drive_t *drive, struct request *rq,
outsw(data_addr, buf, len / 2);
}
}
+EXPORT_SYMBOL_GPL(ide_output_data);
+
+u8 ide_read_error(ide_drive_t *drive)
+{
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_IN_FEATURE;
+
+ drive->hwif->tp_ops->tf_read(drive, &task);
+
+ return task.tf.error;
+}
+EXPORT_SYMBOL_GPL(ide_read_error);
-void default_hwif_transport(ide_hwif_t *hwif)
+void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason)
{
- hwif->tf_load = ide_tf_load;
- hwif->tf_read = ide_tf_read;
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_IN_LBAH | IDE_TFLAG_IN_LBAM |
+ IDE_TFLAG_IN_NSECT;
- hwif->input_data = ata_input_data;
- hwif->output_data = ata_output_data;
+ drive->hwif->tp_ops->tf_read(drive, &task);
+
+ *bcount = (task.tf.lbah << 8) | task.tf.lbam;
+ *ireason = task.tf.nsect & 3;
}
+EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason);
+
+const struct ide_tp_ops default_tp_ops = {
+ .exec_command = ide_exec_command,
+ .read_status = ide_read_status,
+ .read_altstatus = ide_read_altstatus,
+ .read_sff_dma_status = ide_read_sff_dma_status,
+
+ .set_irq = ide_set_irq,
+
+ .tf_load = ide_tf_load,
+ .tf_read = ide_tf_read,
+
+ .input_data = ide_input_data,
+ .output_data = ide_output_data,
+};
void ide_fix_driveid (struct hd_driveid *id)
{
@@ -483,10 +553,10 @@ int drive_is_ready (ide_drive_t *drive)
* about possible isa-pnp and pci-pnp issues yet.
*/
if (hwif->io_ports.ctl_addr)
- stat = ide_read_altstatus(drive);
+ stat = hwif->tp_ops->read_altstatus(hwif);
else
/* Note: this may clear a pending IRQ!! */
- stat = ide_read_status(drive);
+ stat = hwif->tp_ops->read_status(hwif);
if (stat & BUSY_STAT)
/* drive busy: definitely not interrupting */
@@ -511,24 +581,26 @@ EXPORT_SYMBOL(drive_is_ready);
*/
static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat)
{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
unsigned long flags;
int i;
u8 stat;
udelay(1); /* spec allows drive 400ns to assert "BUSY" */
- stat = ide_read_status(drive);
+ stat = tp_ops->read_status(hwif);
if (stat & BUSY_STAT) {
local_irq_set(flags);
timeout += jiffies;
- while ((stat = ide_read_status(drive)) & BUSY_STAT) {
+ while ((stat = tp_ops->read_status(hwif)) & BUSY_STAT) {
if (time_after(jiffies, timeout)) {
/*
* One last read after the timeout in case
* heavy interrupt load made us not make any
* progress during the timeout..
*/
- stat = ide_read_status(drive);
+ stat = tp_ops->read_status(hwif);
if (!(stat & BUSY_STAT))
break;
@@ -548,7 +620,7 @@ static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long ti
*/
for (i = 0; i < 10; i++) {
udelay(1);
- stat = ide_read_status(drive);
+ stat = tp_ops->read_status(hwif);
if (OK_STAT(stat, good, bad)) {
*rstat = stat;
@@ -674,6 +746,7 @@ no_80w:
int ide_driveid_update(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
struct hd_driveid *id;
unsigned long timeout, flags;
u8 stat;
@@ -684,9 +757,9 @@ int ide_driveid_update(ide_drive_t *drive)
*/
SELECT_MASK(drive, 1);
- ide_set_irq(drive, 0);
+ tp_ops->set_irq(hwif, 0);
msleep(50);
- hwif->OUTBSYNC(hwif, WIN_IDENTIFY, hwif->io_ports.command_addr);
+ tp_ops->exec_command(hwif, WIN_IDENTIFY);
timeout = jiffies + WAIT_WORSTCASE;
do {
if (time_after(jiffies, timeout)) {
@@ -695,11 +768,11 @@ int ide_driveid_update(ide_drive_t *drive)
}
msleep(50); /* give drive a breather */
- stat = ide_read_altstatus(drive);
+ stat = tp_ops->read_altstatus(hwif);
} while (stat & BUSY_STAT);
msleep(50); /* wait for IRQ and DRQ_STAT */
- stat = ide_read_status(drive);
+ stat = tp_ops->read_status(hwif);
if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
SELECT_MASK(drive, 0);
@@ -713,8 +786,8 @@ int ide_driveid_update(ide_drive_t *drive)
local_irq_restore(flags);
return 0;
}
- hwif->input_data(drive, NULL, id, SECTOR_SIZE);
- (void)ide_read_status(drive); /* clear drive IRQ */
+ tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
+ (void)tp_ops->read_status(hwif); /* clear drive IRQ */
local_irq_enable();
local_irq_restore(flags);
ide_fix_driveid(id);
@@ -735,9 +808,10 @@ int ide_driveid_update(ide_drive_t *drive)
int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
{
ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
int error = 0;
u8 stat;
+ ide_task_t task;
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_ops) /* check if host supports DMA */
@@ -770,12 +844,19 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
SELECT_DRIVE(drive);
SELECT_MASK(drive, 0);
udelay(1);
- ide_set_irq(drive, 0);
- hwif->OUTB(speed, io_ports->nsect_addr);
- hwif->OUTB(SETFEATURES_XFER, io_ports->feature_addr);
- hwif->OUTBSYNC(hwif, WIN_SETFEATURES, io_ports->command_addr);
+ tp_ops->set_irq(hwif, 0);
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT;
+ task.tf.feature = SETFEATURES_XFER;
+ task.tf.nsect = speed;
+
+ tp_ops->tf_load(drive, &task);
+
+ tp_ops->exec_command(hwif, WIN_SETFEATURES);
+
if (drive->quirk_list == 2)
- ide_set_irq(drive, 1);
+ tp_ops->set_irq(hwif, 1);
error = __ide_wait_stat(drive, drive->ready_stat,
BUSY_STAT|DRQ_STAT|ERR_STAT,
@@ -796,8 +877,7 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
skip:
#ifdef CONFIG_BLK_DEV_IDEDMA
- if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) &&
- drive->using_dma)
+ if (speed >= XFER_SW_DMA_0 && drive->using_dma)
hwif->dma_ops->dma_host_set(drive, 1);
else if (hwif->dma_ops) /* check if host supports DMA */
ide_dma_off_quietly(drive);
@@ -881,7 +961,7 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
spin_lock_irqsave(&ide_lock, flags);
__ide_set_handler(drive, handler, timeout, expiry);
- hwif->OUTBSYNC(hwif, cmd, hwif->io_ports.command_addr);
+ hwif->tp_ops->exec_command(hwif, cmd);
/*
* Drive takes 400nS to respond, we must avoid the IRQ being
* serviced before that.
@@ -899,7 +979,7 @@ void ide_execute_pkt_cmd(ide_drive_t *drive)
unsigned long flags;
spin_lock_irqsave(&ide_lock, flags);
- hwif->OUTBSYNC(hwif, WIN_PACKETCMD, hwif->io_ports.command_addr);
+ hwif->tp_ops->exec_command(hwif, WIN_PACKETCMD);
ndelay(400);
spin_unlock_irqrestore(&ide_lock, flags);
}
@@ -924,12 +1004,13 @@ static ide_startstop_t do_reset1 (ide_drive_t *, int);
*/
static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
{
- ide_hwgroup_t *hwgroup = HWGROUP(drive);
+ ide_hwif_t *hwif = drive->hwif;
+ ide_hwgroup_t *hwgroup = hwif->hwgroup;
u8 stat;
SELECT_DRIVE(drive);
udelay (10);
- stat = ide_read_status(drive);
+ stat = hwif->tp_ops->read_status(hwif);
if (OK_STAT(stat, 0, BUSY_STAT))
printk("%s: ATAPI reset complete\n", drive->name);
@@ -975,7 +1056,7 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
}
}
- tmp = ide_read_status(drive);
+ tmp = hwif->tp_ops->read_status(hwif);
if (!OK_STAT(tmp, 0, BUSY_STAT)) {
if (time_before(jiffies, hwgroup->poll_timeout)) {
@@ -1089,8 +1170,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
ide_hwif_t *hwif;
ide_hwgroup_t *hwgroup;
struct ide_io_ports *io_ports;
+ const struct ide_tp_ops *tp_ops;
const struct ide_port_ops *port_ops;
- u8 ctl;
spin_lock_irqsave(&ide_lock, flags);
hwif = HWIF(drive);
@@ -1098,6 +1179,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
io_ports = &hwif->io_ports;
+ tp_ops = hwif->tp_ops;
+
/* We must not reset with running handlers */
BUG_ON(hwgroup->handler != NULL);
@@ -1106,7 +1189,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
pre_reset(drive);
SELECT_DRIVE(drive);
udelay (20);
- hwif->OUTBSYNC(hwif, WIN_SRST, io_ports->command_addr);
+ tp_ops->exec_command(hwif, WIN_SRST);
ndelay(400);
hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
hwgroup->polling = 1;
@@ -1135,16 +1218,15 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
* immediate interrupt due to the edge transition it produces.
* This single interrupt gives us a "fast poll" for drives that
* recover from reset very quickly, saving us the first 50ms wait time.
+ *
+ * TODO: add ->softreset method and stop abusing ->set_irq
*/
/* set SRST and nIEN */
- hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | 6, io_ports->ctl_addr);
+ tp_ops->set_irq(hwif, 4);
/* more than enough time */
udelay(10);
- if (drive->quirk_list == 2)
- ctl = ATA_DEVCTL_OBS; /* clear SRST and nIEN */
- else
- ctl = ATA_DEVCTL_OBS | 2; /* clear SRST, leave nIEN */
- hwif->OUTBSYNC(hwif, ctl, io_ports->ctl_addr);
+ /* clear SRST, leave nIEN (unless device is on the quirk list) */
+ tp_ops->set_irq(hwif, drive->quirk_list == 2);
/* more than enough time */
udelay(10);
hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
@@ -1189,7 +1271,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
* about locking issues (2.5 work ?).
*/
mdelay(1);
- stat = hwif->INB(hwif->io_ports.status_addr);
+ stat = hwif->tp_ops->read_status(hwif);
if ((stat & BUSY_STAT) == 0)
return 0;
/*
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 13af72f09ec..97fefabea8b 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -266,22 +266,11 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
rate = ide_rate_filter(drive, rate);
+ BUG_ON(rate < XFER_PIO_0);
+
if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5)
return ide_set_pio_mode(drive, rate);
- /*
- * TODO: transfer modes 0x00-0x07 passed from the user-space are
- * currently handled here which needs fixing (please note that such
- * case could happen iff the transfer mode has already been set on
- * the device by ide-proc.c::set_xfer_rate()).
- */
- if (rate < XFER_PIO_0) {
- if (hwif->host_flags & IDE_HFLAG_ABUSE_SET_DMA_MODE)
- return ide_set_dma_mode(drive, rate);
- else
- return ide_config_drive_speed(drive, rate);
- }
-
return ide_set_dma_mode(drive, rate);
}
@@ -336,7 +325,7 @@ static void ide_dump_sector(ide_drive_t *drive)
else
task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE;
- drive->hwif->tf_read(drive, &task);
+ drive->hwif->tp_ops->tf_read(drive, &task);
if (lba48 || (tf->device & ATA_LBA))
printk(", LBAsect=%llu",
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index 03f2ef5470a..bac9b392b68 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -29,9 +29,10 @@ static struct pnp_device_id idepnp_devices[] = {
static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
{
- hw_regs_t hw;
- ide_hwif_t *hwif;
+ struct ide_host *host;
unsigned long base, ctl;
+ int rc;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n");
@@ -59,31 +60,25 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
hw.irq = pnp_irq(dev, 0);
hw.chipset = ide_generic;
- hwif = ide_find_port();
- if (hwif) {
- u8 index = hwif->index;
- u8 idx[4] = { index, 0xff, 0xff, 0xff };
+ rc = ide_host_add(NULL, hws, &host);
+ if (rc)
+ goto out;
- ide_init_port_hw(hwif, &hw);
-
- pnp_set_drvdata(dev, hwif);
-
- ide_device_add(idx, NULL);
-
- return 0;
- }
+ pnp_set_drvdata(dev, host);
+ return 0;
+out:
release_region(ctl, 1);
release_region(base, 8);
- return -1;
+ return rc;
}
static void idepnp_remove(struct pnp_dev *dev)
{
- ide_hwif_t *hwif = pnp_get_drvdata(dev);
+ struct ide_host *host = pnp_get_drvdata(dev);
- ide_unregister(hwif);
+ ide_host_remove(host);
release_region(pnp_port_start(dev, 1), 1);
release_region(pnp_port_start(dev, 0), 8);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 235ebdb29b2..4aa76c45375 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -39,8 +39,6 @@
#include <asm/uaccess.h>
#include <asm/io.h>
-static ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */
-
/**
* generic_id - add a generic drive id
* @drive: drive to make an ID block for
@@ -126,7 +124,7 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
id = drive->id;
/* read 512 bytes of id info */
- hwif->input_data(drive, NULL, id, SECTOR_SIZE);
+ hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
drive->id_read = 1;
local_irq_enable();
@@ -267,6 +265,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
{
ide_hwif_t *hwif = HWIF(drive);
struct ide_io_ports *io_ports = &hwif->io_ports;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
int use_altstatus = 0, rc;
unsigned long timeout;
u8 s = 0, a = 0;
@@ -275,8 +274,8 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
msleep(50);
if (io_ports->ctl_addr) {
- a = ide_read_altstatus(drive);
- s = ide_read_status(drive);
+ a = tp_ops->read_altstatus(hwif);
+ s = tp_ops->read_status(hwif);
if ((a ^ s) & ~INDEX_STAT)
/* ancient Seagate drives, broken interfaces */
printk(KERN_INFO "%s: probing with STATUS(0x%02x) "
@@ -290,12 +289,18 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
/* set features register for atapi
* identify command to be sure of reply
*/
- if ((cmd == WIN_PIDENTIFY))
- /* disable dma & overlap */
- hwif->OUTB(0, io_ports->feature_addr);
+ if (cmd == WIN_PIDENTIFY) {
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ /* disable DMA & overlap */
+ task.tf_flags = IDE_TFLAG_OUT_FEATURE;
+
+ tp_ops->tf_load(drive, &task);
+ }
/* ask drive for ID */
- hwif->OUTBSYNC(hwif, cmd, hwif->io_ports.command_addr);
+ tp_ops->exec_command(hwif, cmd);
timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
timeout += jiffies;
@@ -306,13 +311,13 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
}
/* give drive a breather */
msleep(50);
- s = use_altstatus ? ide_read_altstatus(drive)
- : ide_read_status(drive);
+ s = use_altstatus ? tp_ops->read_altstatus(hwif)
+ : tp_ops->read_status(hwif);
} while (s & BUSY_STAT);
/* wait for IRQ and DRQ_STAT */
msleep(50);
- s = ide_read_status(drive);
+ s = tp_ops->read_status(hwif);
if (OK_STAT(s, DRQ_STAT, BAD_R_STAT)) {
unsigned long flags;
@@ -324,7 +329,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
/* drive responded with ID */
rc = 0;
/* clear drive IRQ */
- (void)ide_read_status(drive);
+ (void)tp_ops->read_status(hwif);
local_irq_restore(flags);
} else {
/* drive refused ID */
@@ -346,6 +351,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
static int try_to_identify (ide_drive_t *drive, u8 cmd)
{
ide_hwif_t *hwif = HWIF(drive);
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
int retval;
int autoprobe = 0;
unsigned long cookie = 0;
@@ -361,7 +367,7 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
autoprobe = 1;
cookie = probe_irq_on();
}
- ide_set_irq(drive, autoprobe);
+ tp_ops->set_irq(hwif, autoprobe);
}
retval = actual_try_to_identify(drive, cmd);
@@ -369,9 +375,9 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
if (autoprobe) {
int irq;
- ide_set_irq(drive, 0);
+ tp_ops->set_irq(hwif, 0);
/* clear drive IRQ */
- (void)ide_read_status(drive);
+ (void)tp_ops->read_status(hwif);
udelay(5);
irq = probe_irq_off(cookie);
if (!hwif->irq) {
@@ -396,7 +402,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
do {
msleep(50);
- stat = hwif->INB(hwif->io_ports.status_addr);
+ stat = hwif->tp_ops->read_status(hwif);
if ((stat & BUSY_STAT) == 0)
return 0;
} while (time_before(jiffies, timeout));
@@ -404,6 +410,18 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
return 1;
}
+static u8 ide_read_device(ide_drive_t *drive)
+{
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_IN_DEVICE;
+
+ drive->hwif->tp_ops->tf_read(drive, &task);
+
+ return task.tf.device;
+}
+
/**
* do_probe - probe an IDE device
* @drive: drive to probe
@@ -428,7 +446,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
static int do_probe (ide_drive_t *drive, u8 cmd)
{
ide_hwif_t *hwif = HWIF(drive);
- struct ide_io_ports *io_ports = &hwif->io_ports;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
int rc;
u8 stat;
@@ -449,8 +467,8 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
msleep(50);
SELECT_DRIVE(drive);
msleep(50);
- if (hwif->INB(io_ports->device_addr) != drive->select.all &&
- !drive->present) {
+
+ if (ide_read_device(drive) != drive->select.all && !drive->present) {
if (drive->select.b.unit != 0) {
/* exit with drive0 selected */
SELECT_DRIVE(&hwif->drives[0]);
@@ -461,7 +479,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
return 3;
}
- stat = ide_read_status(drive);
+ stat = tp_ops->read_status(hwif);
if (OK_STAT(stat, READY_STAT, BUSY_STAT) ||
drive->present || cmd == WIN_PIDENTIFY) {
@@ -471,7 +489,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
rc = try_to_identify(drive,cmd);
}
- stat = ide_read_status(drive);
+ stat = tp_ops->read_status(hwif);
if (stat == (BUSY_STAT | READY_STAT))
return 4;
@@ -482,13 +500,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
msleep(50);
SELECT_DRIVE(drive);
msleep(50);
- hwif->OUTBSYNC(hwif, WIN_SRST, io_ports->command_addr);
+ tp_ops->exec_command(hwif, WIN_SRST);
(void)ide_busy_sleep(hwif);
rc = try_to_identify(drive, cmd);
}
/* ensure drive IRQ is clear */
- stat = ide_read_status(drive);
+ stat = tp_ops->read_status(hwif);
if (rc == 1)
printk(KERN_ERR "%s: no response (status = 0x%02x)\n",
@@ -502,7 +520,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
SELECT_DRIVE(&hwif->drives[0]);
msleep(50);
/* ensure drive irq is clear */
- (void)ide_read_status(drive);
+ (void)tp_ops->read_status(hwif);
}
return rc;
}
@@ -513,12 +531,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
static void enable_nest (ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
u8 stat;
printk("%s: enabling %s -- ", hwif->name, drive->id->model);
SELECT_DRIVE(drive);
msleep(50);
- hwif->OUTBSYNC(hwif, EXABYTE_ENABLE_NEST, hwif->io_ports.command_addr);
+ tp_ops->exec_command(hwif, EXABYTE_ENABLE_NEST);
if (ide_busy_sleep(hwif)) {
printk(KERN_CONT "failed (timeout)\n");
@@ -527,7 +546,7 @@ static void enable_nest (ide_drive_t *drive)
msleep(50);
- stat = ide_read_status(drive);
+ stat = tp_ops->read_status(hwif);
if (!OK_STAT(stat, 0, BAD_STAT))
printk(KERN_CONT "failed (status = 0x%02x)\n", stat);
@@ -619,7 +638,7 @@ static inline u8 probe_for_drive (ide_drive_t *drive)
return drive->present;
}
-static void hwif_release_dev (struct device *dev)
+static void hwif_release_dev(struct device *dev)
{
ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev);
@@ -709,7 +728,7 @@ static int ide_port_wait_ready(ide_hwif_t *hwif)
/* Ignore disks that we will not probe for later. */
if (!drive->noprobe || drive->present) {
SELECT_DRIVE(drive);
- ide_set_irq(drive, 1);
+ hwif->tp_ops->set_irq(hwif, 1);
mdelay(2);
rc = ide_wait_not_busy(hwif, 35000);
if (rc)
@@ -971,6 +990,45 @@ static void ide_port_setup_devices(ide_hwif_t *hwif)
mutex_unlock(&ide_cfg_mtx);
}
+static ide_hwif_t *ide_ports[MAX_HWIFS];
+
+void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
+{
+ ide_hwgroup_t *hwgroup = hwif->hwgroup;
+
+ ide_ports[hwif->index] = NULL;
+
+ spin_lock_irq(&ide_lock);
+ /*
+ * Remove us from the hwgroup, and free
+ * the hwgroup if we were the only member
+ */
+ if (hwif->next == hwif) {
+ BUG_ON(hwgroup->hwif != hwif);
+ kfree(hwgroup);
+ } else {
+ /* There is another interface in hwgroup.
+ * Unlink us, and set hwgroup->drive and ->hwif to
+ * something sane.
+ */
+ ide_hwif_t *g = hwgroup->hwif;
+
+ while (g->next != hwif)
+ g = g->next;
+ g->next = hwif->next;
+ if (hwgroup->hwif == hwif) {
+ /* Chose a random hwif for hwgroup->hwif.
+ * It's guaranteed that there are no drives
+ * left in the hwgroup.
+ */
+ BUG_ON(hwgroup->drive != NULL);
+ hwgroup->hwif = g;
+ }
+ BUG_ON(hwgroup->hwif == hwif);
+ }
+ spin_unlock_irq(&ide_lock);
+}
+
/*
* This routine sets up the irq for an ide interface, and creates a new
* hwgroup for the irq/hwif if none was previously assigned.
@@ -998,8 +1056,9 @@ static int init_irq (ide_hwif_t *hwif)
* Group up with any other hwifs that share our irq(s).
*/
for (index = 0; index < MAX_HWIFS; index++) {
- ide_hwif_t *h = &ide_hwifs[index];
- if (h->hwgroup) { /* scan only initialized hwif's */
+ ide_hwif_t *h = ide_ports[index];
+
+ if (h && h->hwgroup) { /* scan only initialized ports */
if (hwif->irq == h->irq) {
hwif->sharing_irq = h->sharing_irq = 1;
if (hwif->chipset != ide_pci ||
@@ -1053,6 +1112,8 @@ static int init_irq (ide_hwif_t *hwif)
hwgroup->timer.data = (unsigned long) hwgroup;
}
+ ide_ports[hwif->index] = hwif;
+
/*
* Allocate the irq, if not already obtained for another hwif
*/
@@ -1066,8 +1127,7 @@ static int init_irq (ide_hwif_t *hwif)
sa = IRQF_SHARED;
if (io_ports->ctl_addr)
- /* clear nIEN */
- hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS, io_ports->ctl_addr);
+ hwif->tp_ops->set_irq(hwif, 1);
if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup))
goto out_unlink;
@@ -1345,6 +1405,9 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
hwif->host_flags |= d->host_flags;
hwif->pio_mask = d->pio_mask;
+ if (d->tp_ops)
+ hwif->tp_ops = d->tp_ops;
+
/* ->set_pio_mode for DTC2278 is currently limited to port 0 */
if (hwif->chipset != ide_dtc2278 || hwif->channel == 0)
hwif->port_ops = d->port_ops;
@@ -1363,6 +1426,7 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
if (rc < 0) {
printk(KERN_INFO "%s: DMA disabled\n", hwif->name);
+ hwif->dma_base = 0;
hwif->swdma_mask = 0;
hwif->mwdma_mask = 0;
hwif->ultra_mask = 0;
@@ -1446,18 +1510,20 @@ static int ide_sysfs_register_port(ide_hwif_t *hwif)
return rc;
}
+static unsigned int ide_indexes;
+
/**
- * ide_find_port_slot - find free ide_hwifs[] slot
+ * ide_find_port_slot - find free port slot
* @d: IDE port info
*
- * Return the new hwif. If we are out of free slots return NULL.
+ * Return the new port slot index or -ENOENT if we are out of free slots.
*/
-ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d)
+static int ide_find_port_slot(const struct ide_port_info *d)
{
- ide_hwif_t *hwif;
- int i;
+ int idx = -ENOENT;
u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1;
+ u8 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;;
/*
* Claim an unassigned slot.
@@ -1469,51 +1535,106 @@ ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d)
* Unless there is a bootable card that does not use the standard
* ports 0x1f0/0x170 (the ide0/ide1 defaults).
*/
- if (bootable) {
- i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;
-
- for (; i < MAX_HWIFS; i++) {
- hwif = &ide_hwifs[i];
- if (hwif->chipset == ide_unknown)
- goto out_found;
- }
+ mutex_lock(&ide_cfg_mtx);
+ if (MAX_HWIFS == 1) {
+ if (ide_indexes == 0 && i == 0)
+ idx = 1;
} else {
- for (i = 2; i < MAX_HWIFS; i++) {
- hwif = &ide_hwifs[i];
- if (hwif->chipset == ide_unknown)
- goto out_found;
+ if (bootable) {
+ if ((ide_indexes | i) != (1 << MAX_HWIFS) - 1)
+ idx = ffz(ide_indexes | i);
+ } else {
+ if ((ide_indexes | 3) != (1 << MAX_HWIFS) - 1)
+ idx = ffz(ide_indexes | 3);
+ else if ((ide_indexes & 3) != 3)
+ idx = ffz(ide_indexes);
}
- for (i = 0; i < 2 && i < MAX_HWIFS; i++) {
- hwif = &ide_hwifs[i];
- if (hwif->chipset == ide_unknown)
- goto out_found;
+ }
+ if (idx >= 0)
+ ide_indexes |= (1 << idx);
+ mutex_unlock(&ide_cfg_mtx);
+
+ return idx;
+}
+
+static void ide_free_port_slot(int idx)
+{
+ mutex_lock(&ide_cfg_mtx);
+ ide_indexes &= ~(1 << idx);
+ mutex_unlock(&ide_cfg_mtx);
+}
+
+struct ide_host *ide_host_alloc_all(const struct ide_port_info *d,
+ hw_regs_t **hws)
+{
+ struct ide_host *host;
+ int i;
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (host == NULL)
+ return NULL;
+
+ for (i = 0; i < MAX_HWIFS; i++) {
+ ide_hwif_t *hwif;
+ int idx;
+
+ if (hws[i] == NULL)
+ continue;
+
+ hwif = kzalloc(sizeof(*hwif), GFP_KERNEL);
+ if (hwif == NULL)
+ continue;
+
+ idx = ide_find_port_slot(d);
+ if (idx < 0) {
+ printk(KERN_ERR "%s: no free slot for interface\n",
+ d ? d->name : "ide");
+ kfree(hwif);
+ continue;
}
+
+ ide_init_port_data(hwif, idx);
+
+ host->ports[i] = hwif;
+ host->n_ports++;
}
- printk(KERN_ERR "%s: no free slot for interface\n",
- d ? d->name : "ide");
+ if (host->n_ports == 0) {
+ kfree(host);
+ return NULL;
+ }
- return NULL;
+ return host;
+}
+EXPORT_SYMBOL_GPL(ide_host_alloc_all);
+
+struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws)
+{
+ hw_regs_t *hws_all[MAX_HWIFS];
+ int i;
-out_found:
- ide_init_port_data(hwif, i);
- return hwif;
+ for (i = 0; i < MAX_HWIFS; i++)
+ hws_all[i] = (i < 4) ? hws[i] : NULL;
+
+ return ide_host_alloc_all(d, hws_all);
}
-EXPORT_SYMBOL_GPL(ide_find_port_slot);
+EXPORT_SYMBOL_GPL(ide_host_alloc);
-int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
+int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
+ hw_regs_t **hws)
{
ide_hwif_t *hwif, *mate = NULL;
- int i, rc = 0;
+ int i, j = 0;
for (i = 0; i < MAX_HWIFS; i++) {
- if (idx[i] == 0xff) {
+ hwif = host->ports[i];
+
+ if (hwif == NULL) {
mate = NULL;
continue;
}
- hwif = &ide_hwifs[idx[i]];
-
+ ide_init_port_hw(hwif, hws[i]);
ide_port_apply_params(hwif);
if (d == NULL) {
@@ -1534,10 +1655,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
}
for (i = 0; i < MAX_HWIFS; i++) {
- if (idx[i] == 0xff)
- continue;
+ hwif = host->ports[i];
- hwif = &ide_hwifs[idx[i]];
+ if (hwif == NULL)
+ continue;
if (ide_probe_port(hwif) == 0)
hwif->present = 1;
@@ -1551,19 +1672,20 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
}
for (i = 0; i < MAX_HWIFS; i++) {
- if (idx[i] == 0xff)
- continue;
+ hwif = host->ports[i];
- hwif = &ide_hwifs[idx[i]];
+ if (hwif == NULL)
+ continue;
if (hwif_init(hwif) == 0) {
printk(KERN_INFO "%s: failed to initialize IDE "
"interface\n", hwif->name);
hwif->present = 0;
- rc = -1;
continue;
}
+ j++;
+
if (hwif->present)
ide_port_setup_devices(hwif);
@@ -1574,10 +1696,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
}
for (i = 0; i < MAX_HWIFS; i++) {
- if (idx[i] == 0xff)
- continue;
+ hwif = host->ports[i];
- hwif = &ide_hwifs[idx[i]];
+ if (hwif == NULL)
+ continue;
if (hwif->chipset == ide_unknown)
hwif->chipset = ide_generic;
@@ -1587,10 +1709,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
}
for (i = 0; i < MAX_HWIFS; i++) {
- if (idx[i] == 0xff)
- continue;
+ hwif = host->ports[i];
- hwif = &ide_hwifs[idx[i]];
+ if (hwif == NULL)
+ continue;
ide_sysfs_register_port(hwif);
ide_proc_register_port(hwif);
@@ -1599,21 +1721,64 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
ide_proc_port_register_devices(hwif);
}
- return rc;
+ return j ? 0 : -1;
}
-EXPORT_SYMBOL_GPL(ide_device_add_all);
+EXPORT_SYMBOL_GPL(ide_host_register);
-int ide_device_add(u8 idx[4], const struct ide_port_info *d)
+int ide_host_add(const struct ide_port_info *d, hw_regs_t **hws,
+ struct ide_host **hostp)
{
- u8 idx_all[MAX_HWIFS];
+ struct ide_host *host;
+ int rc;
+
+ host = ide_host_alloc(d, hws);
+ if (host == NULL)
+ return -ENOMEM;
+
+ rc = ide_host_register(host, d, hws);
+ if (rc) {
+ ide_host_free(host);
+ return rc;
+ }
+
+ if (hostp)
+ *hostp = host;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_host_add);
+
+void ide_host_free(struct ide_host *host)
+{
+ ide_hwif_t *hwif;
int i;
- for (i = 0; i < MAX_HWIFS; i++)
- idx_all[i] = (i < 4) ? idx[i] : 0xff;
+ for (i = 0; i < MAX_HWIFS; i++) {
+ hwif = host->ports[i];
- return ide_device_add_all(idx_all, d);
+ if (hwif == NULL)
+ continue;
+
+ ide_free_port_slot(hwif->index);
+ kfree(hwif);
+ }
+
+ kfree(host);
}
-EXPORT_SYMBOL_GPL(ide_device_add);
+EXPORT_SYMBOL_GPL(ide_host_free);
+
+void ide_host_remove(struct ide_host *host)
+{
+ int i;
+
+ for (i = 0; i < MAX_HWIFS; i++) {
+ if (host->ports[i])
+ ide_unregister(host->ports[i]);
+ }
+
+ ide_host_free(host);
+}
+EXPORT_SYMBOL_GPL(ide_host_remove);
void ide_port_scan(ide_hwif_t *hwif)
{
@@ -1634,11 +1799,10 @@ void ide_port_scan(ide_hwif_t *hwif)
}
EXPORT_SYMBOL_GPL(ide_port_scan);
-static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no,
- const struct ide_port_info *d,
+static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
+ u8 port_no, const struct ide_port_info *d,
unsigned long config)
{
- ide_hwif_t *hwif;
unsigned long base, ctl;
int irq;
@@ -1668,33 +1832,25 @@ static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no,
ide_std_init_ports(hw, base, ctl);
hw->irq = irq;
hw->chipset = d->chipset;
+ hw->config = config;
- hwif = ide_find_port_slot(d);
- if (hwif) {
- ide_init_port_hw(hwif, hw);
- if (config)
- hwif->config_data = config;
- idx[port_no] = hwif->index;
- }
+ hws[port_no] = hw;
}
int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
{
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- hw_regs_t hw[2];
+ hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
memset(&hw, 0, sizeof(hw));
if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
- ide_legacy_init_one(idx, &hw[0], 0, d, config);
- ide_legacy_init_one(idx, &hw[1], 1, d, config);
+ ide_legacy_init_one(hws, &hw[0], 0, d, config);
+ ide_legacy_init_one(hws, &hw[1], 1, d, config);
- if (idx[0] == 0xff && idx[1] == 0xff &&
+ if (hws[0] == NULL && hws[1] == NULL &&
(d->host_flags & IDE_HFLAG_SINGLE))
return -ENOENT;
- ide_device_add(idx, d);
-
- return 0;
+ return ide_host_add(d, hws, NULL);
}
EXPORT_SYMBOL_GPL(ide_legacy_device_add);
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 8af88bf0969..151c91e933d 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -345,7 +345,7 @@ static int set_xfer_rate (ide_drive_t *drive, int arg)
ide_task_t task;
int err;
- if (arg < 0 || arg > 70)
+ if (arg < XFER_PIO_0 || arg > XFER_UDMA_6)
return -EINVAL;
memset(&task, 0, sizeof(task));
@@ -357,7 +357,7 @@ static int set_xfer_rate (ide_drive_t *drive, int arg)
err = ide_no_data_taskfile(drive, &task);
- if (!err && arg) {
+ if (!err) {
ide_set_xfer_rate(drive, (u8) arg);
ide_driveid_update(drive);
}
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 353dd11b928..6962ca4891a 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -195,23 +195,6 @@ enum {
#define IDETAPE_BLOCK_DESCRIPTOR 0
#define IDETAPE_CAPABILITIES_PAGE 0x2a
-/* Tape flag bits values. */
-enum {
- IDETAPE_FLAG_IGNORE_DSC = (1 << 0),
- /* 0 When the tape position is unknown */
- IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
- /* Device already opened */
- IDETAPE_FLAG_BUSY = (1 << 2),
- /* Attempt to auto-detect the current user block size */
- IDETAPE_FLAG_DETECT_BS = (1 << 3),
- /* Currently on a filemark */
- IDETAPE_FLAG_FILEMARK = (1 << 4),
- /* DRQ interrupt device */
- IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 5),
- /* 0 = no tape is loaded, so we don't rewind after ejecting */
- IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 6),
-};
-
/*
* Most of our global data which we need to save even as we leave the driver due
* to an interrupt or a timer event is stored in the struct defined below.
@@ -312,8 +295,6 @@ typedef struct ide_tape_obj {
/* Wasted space in each stage */
int excess_bh_size;
- /* Status/Action flags: long for set_bit */
- unsigned long flags;
/* protects the ide-tape queue */
spinlock_t lock;
@@ -398,7 +379,7 @@ static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
count = min(
(unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
bcount);
- drive->hwif->input_data(drive, NULL, bh->b_data +
+ drive->hwif->tp_ops->input_data(drive, NULL, bh->b_data +
atomic_read(&bh->b_count), count);
bcount -= count;
atomic_add(count, &bh->b_count);
@@ -424,7 +405,7 @@ static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
return;
}
count = min((unsigned int)pc->b_count, (unsigned int)bcount);
- drive->hwif->output_data(drive, NULL, pc->b_data, count);
+ drive->hwif->tp_ops->output_data(drive, NULL, pc->b_data, count);
bcount -= count;
pc->b_data += count;
pc->b_count -= count;
@@ -585,7 +566,6 @@ static void ide_tape_kfree_buffer(idetape_tape_t *tape)
bh = bh->b_reqnext;
kfree(prev_bh);
}
- kfree(tape->merge_bh);
}
static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
@@ -665,7 +645,7 @@ static void ide_tape_callback(ide_drive_t *drive)
if (readpos[0] & 0x4) {
printk(KERN_INFO "ide-tape: Block location is unknown"
"to the tape\n");
- clear_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
+ clear_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags);
uptodate = 0;
} else {
debug_log(DBG_SENSE, "Block Location - %u\n",
@@ -673,7 +653,7 @@ static void ide_tape_callback(ide_drive_t *drive)
tape->partition = readpos[1];
tape->first_frame = be32_to_cpu(*(u32 *)&readpos[4]);
- set_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
+ set_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags);
}
}
@@ -690,7 +670,6 @@ static void idetape_init_pc(struct ide_atapi_pc *pc)
pc->buf_size = IDETAPE_PC_BUFFER_SIZE;
pc->bh = NULL;
pc->b_data = NULL;
- pc->callback = ide_tape_callback;
}
static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
@@ -705,7 +684,7 @@ static void idetape_init_rq(struct request *rq, u8 cmd)
{
blk_rq_init(NULL, rq);
rq->cmd_type = REQ_TYPE_SPECIAL;
- rq->cmd[0] = cmd;
+ rq->cmd[13] = cmd;
}
/*
@@ -732,6 +711,7 @@ static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
rq->cmd_flags |= REQ_PREEMPT;
rq->buffer = (char *) pc;
rq->rq_disk = tape->disk;
+ memcpy(rq->cmd, pc->c, 12);
ide_do_drive_cmd(drive, rq);
}
@@ -742,7 +722,6 @@ static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
*/
static void idetape_retry_pc(ide_drive_t *drive)
{
- idetape_tape_t *tape = drive->driver_data;
struct ide_atapi_pc *pc;
struct request *rq;
@@ -750,7 +729,7 @@ static void idetape_retry_pc(ide_drive_t *drive)
pc = idetape_next_pc_storage(drive);
rq = idetape_next_rq_storage(drive);
idetape_create_request_sense_cmd(pc);
- set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
+ set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
idetape_queue_pc_head(drive, pc, rq);
}
@@ -887,7 +866,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
pc->error = IDETAPE_ERROR_GENERAL;
}
tape->failed_pc = NULL;
- pc->callback(drive);
+ drive->pc_callback(drive);
return ide_stopped;
}
debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
@@ -927,11 +906,12 @@ static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
{
+ ide_hwif_t *hwif = drive->hwif;
idetape_tape_t *tape = drive->driver_data;
struct ide_atapi_pc *pc = tape->pc;
u8 stat;
- stat = ide_read_status(drive);
+ stat = hwif->tp_ops->read_status(hwif);
if (stat & SEEK_STAT) {
if (stat & ERR_STAT) {
@@ -948,14 +928,17 @@ static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
pc->error = IDETAPE_ERROR_GENERAL;
tape->failed_pc = NULL;
}
- pc->callback(drive);
+ drive->pc_callback(drive);
return ide_stopped;
}
static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
- struct ide_atapi_pc *pc, unsigned int length,
- struct idetape_bh *bh, u8 opcode)
+ struct ide_atapi_pc *pc, struct request *rq,
+ u8 opcode)
{
+ struct idetape_bh *bh = (struct idetape_bh *)rq->special;
+ unsigned int length = rq->current_nr_sectors;
+
idetape_init_pc(pc);
put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
pc->c[1] = 1;
@@ -975,11 +958,14 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
pc->b_data = bh->b_data;
pc->b_count = atomic_read(&bh->b_count);
}
+
+ memcpy(rq->cmd, pc->c, 12);
}
static ide_startstop_t idetape_do_request(ide_drive_t *drive,
struct request *rq, sector_t block)
{
+ ide_hwif_t *hwif = drive->hwif;
idetape_tape_t *tape = drive->driver_data;
struct ide_atapi_pc *pc = NULL;
struct request *postponed_rq = tape->postponed_rq;
@@ -1017,17 +1003,17 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
* If the tape is still busy, postpone our request and service
* the other device meanwhile.
*/
- stat = ide_read_status(drive);
+ stat = hwif->tp_ops->read_status(hwif);
- if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
- set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
+ if (!drive->dsc_overlap && !(rq->cmd[13] & REQ_IDETAPE_PC2))
+ set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
if (drive->post_reset == 1) {
- set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
+ set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
drive->post_reset = 0;
}
- if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
+ if (!test_and_clear_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags) &&
(stat & SEEK_STAT) == 0) {
if (postponed_rq == NULL) {
tape->dsc_polling_start = jiffies;
@@ -1036,7 +1022,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
} else if (time_after(jiffies, tape->dsc_timeout)) {
printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
tape->name);
- if (rq->cmd[0] & REQ_IDETAPE_PC2) {
+ if (rq->cmd[13] & REQ_IDETAPE_PC2) {
idetape_media_access_finished(drive);
return ide_stopped;
} else {
@@ -1049,35 +1035,29 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
idetape_postpone_request(drive);
return ide_stopped;
}
- if (rq->cmd[0] & REQ_IDETAPE_READ) {
+ if (rq->cmd[13] & REQ_IDETAPE_READ) {
pc = idetape_next_pc_storage(drive);
- ide_tape_create_rw_cmd(tape, pc, rq->current_nr_sectors,
- (struct idetape_bh *)rq->special,
- READ_6);
+ ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
goto out;
}
- if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
+ if (rq->cmd[13] & REQ_IDETAPE_WRITE) {
pc = idetape_next_pc_storage(drive);
- ide_tape_create_rw_cmd(tape, pc, rq->current_nr_sectors,
- (struct idetape_bh *)rq->special,
- WRITE_6);
+ ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6);
goto out;
}
- if (rq->cmd[0] & REQ_IDETAPE_PC1) {
+ if (rq->cmd[13] & REQ_IDETAPE_PC1) {
pc = (struct ide_atapi_pc *) rq->buffer;
- rq->cmd[0] &= ~(REQ_IDETAPE_PC1);
- rq->cmd[0] |= REQ_IDETAPE_PC2;
+ rq->cmd[13] &= ~(REQ_IDETAPE_PC1);
+ rq->cmd[13] |= REQ_IDETAPE_PC2;
goto out;
}
- if (rq->cmd[0] & REQ_IDETAPE_PC2) {
+ if (rq->cmd[13] & REQ_IDETAPE_PC2) {
idetape_media_access_finished(drive);
return ide_stopped;
}
BUG();
-out:
- if (test_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags))
- pc->flags |= PC_FLAG_DRQ_INTERRUPT;
+out:
return idetape_issue_pc(drive, pc);
}
@@ -1281,8 +1261,9 @@ static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_SPECIAL;
- rq->cmd[0] = REQ_IDETAPE_PC1;
+ rq->cmd[13] = REQ_IDETAPE_PC1;
rq->buffer = (char *)pc;
+ memcpy(rq->cmd, pc->c, 12);
error = blk_execute_rq(drive->queue, tape->disk, rq, 0);
blk_put_request(rq);
return error;
@@ -1304,7 +1285,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
int load_attempted = 0;
/* Wait for the tape to become ready */
- set_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
+ set_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags);
timeout += jiffies;
while (time_before(jiffies, timeout)) {
idetape_create_test_unit_ready_cmd(&pc);
@@ -1397,7 +1378,7 @@ static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
if (tape->chrdev_dir != IDETAPE_DIR_READ)
return;
- clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags);
+ clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags);
tape->merge_bh_size = 0;
if (tape->merge_bh != NULL) {
ide_tape_kfree_buffer(tape);
@@ -1465,7 +1446,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_SPECIAL;
- rq->cmd[0] = cmd;
+ rq->cmd[13] = cmd;
rq->rq_disk = tape->disk;
rq->special = (void *)bh;
rq->sector = tape->first_frame;
@@ -1636,7 +1617,7 @@ static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
/* If we are at a filemark, return a read length of 0 */
- if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
+ if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
return 0;
idetape_init_read(drive);
@@ -1746,7 +1727,7 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
if (tape->chrdev_dir == IDETAPE_DIR_READ) {
tape->merge_bh_size = 0;
- if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
+ if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
++count;
ide_tape_discard_merge_buffer(drive, 0);
}
@@ -1801,7 +1782,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
if (tape->chrdev_dir != IDETAPE_DIR_READ) {
- if (test_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags))
+ if (test_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags))
if (count > tape->blk_size &&
(count % tape->blk_size) == 0)
tape->user_bs_factor = count / tape->blk_size;
@@ -1841,7 +1822,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
tape->merge_bh_size = bytes_read-temp;
}
finish:
- if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
+ if (!actually_read && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) {
debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
idetape_space_over_filemarks(drive, MTFSF, 1);
@@ -2027,7 +2008,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
!IDETAPE_LU_LOAD_MASK);
retval = idetape_queue_pc_tail(drive, &pc);
if (!retval)
- clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
+ clear_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags);
return retval;
case MTNOP:
ide_tape_discard_merge_buffer(drive, 0);
@@ -2050,9 +2031,9 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
mt_count % tape->blk_size)
return -EIO;
tape->user_bs_factor = mt_count / tape->blk_size;
- clear_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
+ clear_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags);
} else
- set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
+ set_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags);
return 0;
case MTSEEK:
ide_tape_discard_merge_buffer(drive, 0);
@@ -2202,20 +2183,20 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
filp->private_data = tape;
- if (test_and_set_bit(IDETAPE_FLAG_BUSY, &tape->flags)) {
+ if (test_and_set_bit(IDE_AFLAG_BUSY, &drive->atapi_flags)) {
retval = -EBUSY;
goto out_put_tape;
}
retval = idetape_wait_ready(drive, 60 * HZ);
if (retval) {
- clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
+ clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
goto out_put_tape;
}
idetape_read_position(drive);
- if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
+ if (!test_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags))
(void)idetape_rewind_tape(drive);
/* Read block size and write protect status from drive. */
@@ -2231,7 +2212,7 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
if (tape->write_prot) {
if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
(filp->f_flags & O_ACCMODE) == O_RDWR) {
- clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
+ clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
retval = -EROFS;
goto out_put_tape;
}
@@ -2291,7 +2272,7 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
ide_tape_discard_merge_buffer(drive, 1);
}
- if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
+ if (minor < 128 && test_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags))
(void) idetape_rewind_tape(drive);
if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
if (tape->door_locked == DOOR_LOCKED) {
@@ -2301,7 +2282,7 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
}
}
}
- clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
+ clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
ide_tape_put(tape);
unlock_kernel();
return 0;
@@ -2464,6 +2445,8 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
u8 gcw[2];
u16 *ctl = (u16 *)&tape->caps[12];
+ drive->pc_callback = ide_tape_callback;
+
spin_lock_init(&tape->lock);
drive->dsc_overlap = 1;
if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
@@ -2484,7 +2467,7 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
/* Command packet DRQ type */
if (((gcw[0] & 0x60) >> 5) == 1)
- set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
+ set_bit(IDE_AFLAG_DRQ_INTERRUPT, &drive->atapi_flags);
idetape_get_inquiry_results(drive);
idetape_get_mode_sense_results(drive);
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 1fbdb746dc8..aeddbbd69e8 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -64,6 +64,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
ide_hwif_t *hwif = HWIF(drive);
struct ide_taskfile *tf = &task->tf;
ide_handler_t *handler = NULL;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
const struct ide_dma_ops *dma_ops = hwif->dma_ops;
if (task->data_phase == TASKFILE_MULTI_IN ||
@@ -80,15 +81,15 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
ide_tf_dump(drive->name, tf);
- ide_set_irq(drive, 1);
+ tp_ops->set_irq(hwif, 1);
SELECT_MASK(drive, 0);
- hwif->tf_load(drive, task);
+ tp_ops->tf_load(drive, task);
}
switch (task->data_phase) {
case TASKFILE_MULTI_OUT:
case TASKFILE_OUT:
- hwif->OUTBSYNC(hwif, tf->command, hwif->io_ports.command_addr);
+ tp_ops->exec_command(hwif, tf->command);
ndelay(400); /* FIXME */
return pre_task_out_intr(drive, task->rq);
case TASKFILE_MULTI_IN:
@@ -124,7 +125,8 @@ EXPORT_SYMBOL_GPL(do_rw_taskfile);
*/
static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
{
- u8 stat = ide_read_status(drive);
+ ide_hwif_t *hwif = drive->hwif;
+ u8 stat = hwif->tp_ops->read_status(hwif);
if (OK_STAT(stat, READY_STAT, BAD_STAT))
drive->mult_count = drive->mult_req;
@@ -141,11 +143,16 @@ static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
*/
static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
{
+ ide_hwif_t *hwif = drive->hwif;
int retries = 5;
u8 stat;
- while (((stat = ide_read_status(drive)) & BUSY_STAT) && retries--)
+ while (1) {
+ stat = hwif->tp_ops->read_status(hwif);
+ if ((stat & BUSY_STAT) == 0 || retries-- == 0)
+ break;
udelay(10);
+ };
if (OK_STAT(stat, READY_STAT, BAD_STAT))
return ide_stopped;
@@ -162,7 +169,8 @@ static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
*/
static ide_startstop_t recal_intr(ide_drive_t *drive)
{
- u8 stat = ide_read_status(drive);
+ ide_hwif_t *hwif = drive->hwif;
+ u8 stat = hwif->tp_ops->read_status(hwif);
if (!OK_STAT(stat, READY_STAT, BAD_STAT))
return ide_error(drive, "recal_intr", stat);
@@ -174,11 +182,12 @@ static ide_startstop_t recal_intr(ide_drive_t *drive)
*/
static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
{
- ide_task_t *args = HWGROUP(drive)->rq->special;
+ ide_hwif_t *hwif = drive->hwif;
+ ide_task_t *args = hwif->hwgroup->rq->special;
u8 stat;
local_irq_enable_in_hardirq();
- stat = ide_read_status(drive);
+ stat = hwif->tp_ops->read_status(hwif);
if (!OK_STAT(stat, READY_STAT, BAD_STAT))
return ide_error(drive, "task_no_data_intr", stat);
@@ -192,6 +201,7 @@ static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
static u8 wait_drive_not_busy(ide_drive_t *drive)
{
+ ide_hwif_t *hwif = drive->hwif;
int retries;
u8 stat;
@@ -200,7 +210,7 @@ static u8 wait_drive_not_busy(ide_drive_t *drive)
* take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
*/
for (retries = 0; retries < 1000; retries++) {
- stat = ide_read_status(drive);
+ stat = hwif->tp_ops->read_status(hwif);
if (stat & BUSY_STAT)
udelay(10);
@@ -255,9 +265,9 @@ static void ide_pio_sector(ide_drive_t *drive, struct request *rq,
/* do the actual data transfer */
if (write)
- hwif->output_data(drive, rq, buf, SECTOR_SIZE);
+ hwif->tp_ops->output_data(drive, rq, buf, SECTOR_SIZE);
else
- hwif->input_data(drive, rq, buf, SECTOR_SIZE);
+ hwif->tp_ops->input_data(drive, rq, buf, SECTOR_SIZE);
kunmap_atomic(buf, KM_BIO_SRC_IRQ);
#ifdef CONFIG_HIGHMEM
@@ -383,8 +393,8 @@ static ide_startstop_t task_in_unexpected(ide_drive_t *drive, struct request *rq
static ide_startstop_t task_in_intr(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- struct request *rq = HWGROUP(drive)->rq;
- u8 stat = ide_read_status(drive);
+ struct request *rq = hwif->hwgroup->rq;
+ u8 stat = hwif->tp_ops->read_status(hwif);
/* Error? */
if (stat & ERR_STAT)
@@ -418,7 +428,7 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
struct request *rq = HWGROUP(drive)->rq;
- u8 stat = ide_read_status(drive);
+ u8 stat = hwif->tp_ops->read_status(hwif);
if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
return task_error(drive, rq, __func__, stat);
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index d4a6b102a77..60f0ca66aa9 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
- * Copyrifht (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
+ * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
*/
/*
@@ -101,8 +101,7 @@ void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
init_completion(&hwif->gendev_rel_comp);
- default_hwif_iops(hwif);
- default_hwif_transport(hwif);
+ hwif->tp_ops = &default_tp_ops;
ide_port_init_devices_data(hwif);
}
@@ -134,41 +133,6 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
}
}
-void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
-{
- ide_hwgroup_t *hwgroup = hwif->hwgroup;
-
- spin_lock_irq(&ide_lock);
- /*
- * Remove us from the hwgroup, and free
- * the hwgroup if we were the only member
- */
- if (hwif->next == hwif) {
- BUG_ON(hwgroup->hwif != hwif);
- kfree(hwgroup);
- } else {
- /* There is another interface in hwgroup.
- * Unlink us, and set hwgroup->drive and ->hwif to
- * something sane.
- */
- ide_hwif_t *g = hwgroup->hwif;
-
- while (g->next != hwif)
- g = g->next;
- g->next = hwif->next;
- if (hwgroup->hwif == hwif) {
- /* Chose a random hwif for hwgroup->hwif.
- * It's guaranteed that there are no drives
- * left in the hwgroup.
- */
- BUG_ON(hwgroup->drive != NULL);
- hwgroup->hwif = g;
- }
- BUG_ON(hwgroup->hwif == hwif);
- }
- spin_unlock_irq(&ide_lock);
-}
-
/* Called with ide_lock held. */
static void __ide_port_unregister_devices(ide_hwif_t *hwif)
{
@@ -269,16 +233,9 @@ void ide_unregister(ide_hwif_t *hwif)
if (hwif->dma_base)
ide_release_dma_engine(hwif);
- spin_lock_irq(&ide_lock);
- /* restore hwif data to pristine status */
- ide_init_port_data(hwif, hwif->index);
- spin_unlock_irq(&ide_lock);
-
mutex_unlock(&ide_cfg_mtx);
}
-EXPORT_SYMBOL(ide_unregister);
-
void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
{
memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
@@ -287,8 +244,8 @@ void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
hwif->dev = hw->dev;
hwif->gendev.parent = hw->parent ? hw->parent : hw->dev;
hwif->ack_intr = hw->ack_intr;
+ hwif->config_data = hw->config;
}
-EXPORT_SYMBOL_GPL(ide_init_port_hw);
/*
* Locks for IDE setting functionality
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c
index 0497e7f85b0..7c2afa97f41 100644
--- a/drivers/ide/legacy/buddha.c
+++ b/drivers/ide/legacy/buddha.c
@@ -37,6 +37,8 @@
#define CATWEASEL_NUM_HWIFS 3
#define XSURF_NUM_HWIFS 2
+#define MAX_NUM_HWIFS 3
+
/*
* Bases of the IDE interfaces (relative to the board address)
*/
@@ -148,18 +150,14 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
static int __init buddha_init(void)
{
- hw_regs_t hw;
- ide_hwif_t *hwif;
- int i;
-
struct zorro_dev *z = NULL;
u_long buddha_board = 0;
BuddhaType type;
- int buddha_num_hwifs;
+ int buddha_num_hwifs, i;
while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
unsigned long board;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ hw_regs_t hw[MAX_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) {
buddha_num_hwifs = BUDDHA_NUM_HWIFS;
@@ -221,19 +219,13 @@ fail_base2:
ack_intr = xsurf_ack_intr;
}
- buddha_setup_ports(&hw, base, ctl, irq_port, ack_intr);
+ buddha_setup_ports(&hw[i], base, ctl, irq_port,
+ ack_intr);
- hwif = ide_find_port();
- if (hwif) {
- u8 index = hwif->index;
-
- ide_init_port_hw(hwif, &hw);
-
- idx[i] = index;
- }
+ hws[i] = &hw[i];
}
- ide_device_add(idx, NULL);
+ ide_host_add(NULL, hws, NULL);
}
return 0;
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index 129a812bb57..724f95073d8 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -66,6 +66,27 @@ static void falconide_output_data(ide_drive_t *drive, struct request *rq,
outsw_swapw(data_addr, buf, (len + 1) / 2);
}
+/* Atari has a byte-swapped IDE interface */
+static const struct ide_tp_ops falconide_tp_ops = {
+ .exec_command = ide_exec_command,
+ .read_status = ide_read_status,
+ .read_altstatus = ide_read_altstatus,
+ .read_sff_dma_status = ide_read_sff_dma_status,
+
+ .set_irq = ide_set_irq,
+
+ .tf_load = ide_tf_load,
+ .tf_read = ide_tf_read,
+
+ .input_data = falconide_input_data,
+ .output_data = falconide_output_data,
+};
+
+static const struct ide_port_info falconide_port_info = {
+ .tp_ops = &falconide_tp_ops,
+ .host_flags = IDE_HFLAG_NO_DMA,
+};
+
static void __init falconide_setup_ports(hw_regs_t *hw)
{
int i;
@@ -91,11 +112,12 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
static int __init falconide_init(void)
{
- hw_regs_t hw;
- ide_hwif_t *hwif;
+ struct ide_host *host;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ int rc;
if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE))
- return 0;
+ return -ENODEV;
printk(KERN_INFO "ide: Falcon IDE controller\n");
@@ -106,23 +128,25 @@ static int __init falconide_init(void)
falconide_setup_ports(&hw);
- hwif = ide_find_port();
- if (hwif) {
- u8 index = hwif->index;
- u8 idx[4] = { index, 0xff, 0xff, 0xff };
-
- ide_init_port_hw(hwif, &hw);
+ host = ide_host_alloc(&falconide_port_info, hws);
+ if (host == NULL) {
+ rc = -ENOMEM;
+ goto err;
+ }
- /* Atari has a byte-swapped IDE interface */
- hwif->input_data = falconide_input_data;
- hwif->output_data = falconide_output_data;
+ ide_get_lock(NULL, NULL);
+ rc = ide_host_register(host, &falconide_port_info, hws);
+ ide_release_lock();
- ide_get_lock(NULL, NULL);
- ide_device_add(idx, NULL);
- ide_release_lock();
- }
+ if (rc)
+ goto err_free;
return 0;
+err_free:
+ ide_host_free(host);
+err:
+ release_mem_region(ATA_HD_BASE, 0x40);
+ return rc;
}
module_init(falconide_init);
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
index 7e74b20202d..dd5c467d8dd 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/legacy/gayle.c
@@ -31,6 +31,8 @@
#define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */
#define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */
+#define GAYLE_IDEREG_SIZE 0x2000
+
/*
* Offsets from one of the above bases
*/
@@ -56,13 +58,11 @@
#define GAYLE_NUM_HWIFS 1
#define GAYLE_NUM_PROBE_HWIFS GAYLE_NUM_HWIFS
#define GAYLE_HAS_CONTROL_REG 1
-#define GAYLE_IDEREG_SIZE 0x2000
#else /* CONFIG_BLK_DEV_IDEDOUBLER */
#define GAYLE_NUM_HWIFS 2
#define GAYLE_NUM_PROBE_HWIFS (ide_doubler ? GAYLE_NUM_HWIFS : \
GAYLE_NUM_HWIFS-1)
#define GAYLE_HAS_CONTROL_REG (!ide_doubler)
-#define GAYLE_IDEREG_SIZE (ide_doubler ? 0x1000 : 0x2000)
static int ide_doubler;
module_param_named(doubler, ide_doubler, bool, 0);
@@ -124,8 +124,11 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
static int __init gayle_init(void)
{
+ unsigned long phys_base, res_start, res_n;
+ unsigned long base, ctrlport, irqport;
+ ide_ack_intr_t *ack_intr;
int a4000, i;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ hw_regs_t hw[GAYLE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
if (!MACH_IS_AMIGA)
return -ENODEV;
@@ -148,13 +151,6 @@ found:
#endif
"");
- for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) {
- unsigned long base, ctrlport, irqport;
- ide_ack_intr_t *ack_intr;
- hw_regs_t hw;
- ide_hwif_t *hwif;
- unsigned long phys_base, res_start, res_n;
-
if (a4000) {
phys_base = GAYLE_BASE_4000;
irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000);
@@ -168,33 +164,22 @@ found:
* FIXME: we now have selectable modes between mmio v/s iomio
*/
- phys_base += i*GAYLE_NEXT_PORT;
-
res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1);
res_n = GAYLE_IDEREG_SIZE;
if (!request_mem_region(res_start, res_n, "IDE"))
- continue;
+ return -EBUSY;
- base = (unsigned long)ZTWO_VADDR(phys_base);
+ for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) {
+ base = (unsigned long)ZTWO_VADDR(phys_base + i * GAYLE_NEXT_PORT);
ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0;
- gayle_setup_ports(&hw, base, ctrlport, irqport, ack_intr);
-
- hwif = ide_find_port();
- if (hwif) {
- u8 index = hwif->index;
+ gayle_setup_ports(&hw[i], base, ctrlport, irqport, ack_intr);
- ide_init_port_hw(hwif, &hw);
-
- idx[i] = index;
- } else
- release_mem_region(res_start, res_n);
+ hws[i] = &hw[i];
}
- ide_device_add(idx, NULL);
-
- return 0;
+ return ide_host_add(NULL, hws, NULL);
}
module_init(gayle_init);
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/legacy/ide-4drives.c
index 89c8ff0a4d0..c76d55de699 100644
--- a/drivers/ide/legacy/ide-4drives.c
+++ b/drivers/ide/legacy/ide-4drives.c
@@ -28,10 +28,8 @@ static const struct ide_port_info ide_4drives_port_info = {
static int __init ide_4drives_init(void)
{
- ide_hwif_t *hwif, *mate;
unsigned long base = 0x1f0, ctl = 0x3f6;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- hw_regs_t hw;
+ hw_regs_t hw, *hws[] = { &hw, &hw, NULL, NULL };
if (probe_4drives == 0)
return -ENODEV;
@@ -55,21 +53,7 @@ static int __init ide_4drives_init(void)
hw.irq = 14;
hw.chipset = ide_4drives;
- hwif = ide_find_port();
- if (hwif) {
- ide_init_port_hw(hwif, &hw);
- idx[0] = hwif->index;
- }
-
- mate = ide_find_port();
- if (mate) {
- ide_init_port_hw(mate, &hw);
- idx[1] = mate->index;
- }
-
- ide_device_add(idx, &ide_4drives_port_info);
-
- return 0;
+ return ide_host_add(&ide_4drives_port_info, hws, NULL);
}
module_init(ide_4drives_init);
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 27b1e0b7ecb..21bfac13784 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -74,7 +74,7 @@ INT_MODULE_PARM(pc_debug, 0);
typedef struct ide_info_t {
struct pcmcia_device *p_dev;
- ide_hwif_t *hwif;
+ struct ide_host *host;
int ndev;
dev_node_t node;
} ide_info_t;
@@ -132,7 +132,7 @@ static int ide_probe(struct pcmcia_device *link)
static void ide_detach(struct pcmcia_device *link)
{
ide_info_t *info = link->priv;
- ide_hwif_t *hwif = info->hwif;
+ ide_hwif_t *hwif = info->host->ports[0];
unsigned long data_addr, ctl_addr;
DEBUG(0, "ide_detach(0x%p)\n", link);
@@ -157,13 +157,13 @@ static const struct ide_port_info idecs_port_info = {
.host_flags = IDE_HFLAG_NO_DMA,
};
-static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl,
+static struct ide_host *idecs_register(unsigned long io, unsigned long ctl,
unsigned long irq, struct pcmcia_device *handle)
{
+ struct ide_host *host;
ide_hwif_t *hwif;
- hw_regs_t hw;
- int i;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ int i, rc;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
if (!request_region(io, 8, DRV_NAME)) {
printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
@@ -184,30 +184,24 @@ static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl,
hw.chipset = ide_pci;
hw.dev = &handle->dev;
- hwif = ide_find_port();
- if (hwif == NULL)
+ rc = ide_host_add(&idecs_port_info, hws, &host);
+ if (rc)
goto out_release;
- i = hwif->index;
-
- ide_init_port_hw(hwif, &hw);
-
- idx[0] = i;
-
- ide_device_add(idx, &idecs_port_info);
+ hwif = host->ports[0];
if (hwif->present)
- return hwif;
+ return host;
/* retry registration in case device is still spinning up */
for (i = 0; i < 10; i++) {
msleep(100);
ide_port_scan(hwif);
if (hwif->present)
- return hwif;
+ return host;
}
- return hwif;
+ return host;
out_release:
release_region(ctl, 1);
@@ -239,7 +233,7 @@ static int ide_config(struct pcmcia_device *link)
cistpl_cftable_entry_t *cfg;
int pass, last_ret = 0, last_fn = 0, is_kme = 0;
unsigned long io_base, ctl_base;
- ide_hwif_t *hwif;
+ struct ide_host *host;
DEBUG(0, "ide_config(0x%p)\n", link);
@@ -334,21 +328,21 @@ static int ide_config(struct pcmcia_device *link)
if (is_kme)
outb(0x81, ctl_base+1);
- hwif = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
- if (hwif == NULL && link->io.NumPorts1 == 0x20) {
+ host = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
+ if (host == NULL && link->io.NumPorts1 == 0x20) {
outb(0x02, ctl_base + 0x10);
- hwif = idecs_register(io_base + 0x10, ctl_base + 0x10,
+ host = idecs_register(io_base + 0x10, ctl_base + 0x10,
link->irq.AssignedIRQ, link);
}
- if (hwif == NULL)
+ if (host == NULL)
goto failed;
info->ndev = 1;
- sprintf(info->node.dev_name, "hd%c", 'a' + hwif->index * 2);
- info->node.major = hwif->major;
+ sprintf(info->node.dev_name, "hd%c", 'a' + host->ports[0]->index * 2);
+ info->node.major = host->ports[0]->major;
info->node.minor = 0;
- info->hwif = hwif;
+ info->host = host;
link->dev_node = &info->node;
printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n",
info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10);
@@ -379,15 +373,15 @@ failed:
static void ide_release(struct pcmcia_device *link)
{
ide_info_t *info = link->priv;
- ide_hwif_t *hwif = info->hwif;
+ struct ide_host *host = info->host;
DEBUG(0, "ide_release(0x%p)\n", link);
- if (info->ndev) {
+ if (info->ndev)
/* FIXME: if this fails we need to queue the cleanup somehow
-- need to investigate the required PCMCIA magic */
- ide_unregister(hwif);
- }
+ ide_host_remove(host);
+
info->ndev = 0;
pcmcia_disable_device(link);
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
index a249562b34b..051b4ab0f35 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/legacy/ide_platform.c
@@ -52,12 +52,10 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
{
struct resource *res_base, *res_alt, *res_irq;
void __iomem *base, *alt_base;
- ide_hwif_t *hwif;
struct pata_platform_info *pdata;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- int ret = 0;
- int mmio = 0;
- hw_regs_t hw;
+ struct ide_host *host;
+ int ret = 0, mmio = 0;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
struct ide_port_info d = platform_ide_port_info;
pdata = pdev->dev.platform_data;
@@ -94,28 +92,18 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
res_alt->start, res_alt->end - res_alt->start + 1);
}
- hwif = ide_find_port();
- if (!hwif) {
- ret = -ENODEV;
- goto out;
- }
-
memset(&hw, 0, sizeof(hw));
plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
hw.dev = &pdev->dev;
- ide_init_port_hw(hwif, &hw);
-
- if (mmio) {
+ if (mmio)
d.host_flags |= IDE_HFLAG_MMIO;
- default_hwif_mmiops(hwif);
- }
- idx[0] = hwif->index;
-
- ide_device_add(idx, &d);
+ ret = ide_host_add(&d, hws, &host);
+ if (ret)
+ goto out;
- platform_set_drvdata(pdev, hwif);
+ platform_set_drvdata(pdev, host);
return 0;
@@ -125,9 +113,9 @@ out:
static int __devexit plat_ide_remove(struct platform_device *pdev)
{
- ide_hwif_t *hwif = pdev->dev.driver_data;
+ struct ide_host *host = pdev->dev.driver_data;
- ide_unregister(hwif);
+ ide_host_remove(host);
return 0;
}
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
index 0a6195bcfed..a0bb167980e 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/legacy/macide.c
@@ -91,11 +91,10 @@ static const char *mac_ide_name[] =
static int __init macide_init(void)
{
- ide_hwif_t *hwif;
ide_ack_intr_t *ack_intr;
unsigned long base;
int irq;
- hw_regs_t hw;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
if (!MACH_IS_MAC)
return -ENODEV;
@@ -125,17 +124,7 @@ static int __init macide_init(void)
macide_setup_ports(&hw, base, irq, ack_intr);
- hwif = ide_find_port();
- if (hwif) {
- u8 index = hwif->index;
- u8 idx[4] = { index, 0xff, 0xff, 0xff };
-
- ide_init_port_hw(hwif, &hw);
-
- ide_device_add(idx, NULL);
- }
-
- return 0;
+ return ide_host_add(NULL, hws, NULL);
}
module_init(macide_init);
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index 9c2b9d078f6..4abd8fc7819 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -96,6 +96,27 @@ static void q40ide_output_data(ide_drive_t *drive, struct request *rq,
outsw_swapw(data_addr, buf, (len + 1) / 2);
}
+/* Q40 has a byte-swapped IDE interface */
+static const struct ide_tp_ops q40ide_tp_ops = {
+ .exec_command = ide_exec_command,
+ .read_status = ide_read_status,
+ .read_altstatus = ide_read_altstatus,
+ .read_sff_dma_status = ide_read_sff_dma_status,
+
+ .set_irq = ide_set_irq,
+
+ .tf_load = ide_tf_load,
+ .tf_read = ide_tf_read,
+
+ .input_data = q40ide_input_data,
+ .output_data = q40ide_output_data,
+};
+
+static const struct ide_port_info q40ide_port_info = {
+ .tp_ops = &q40ide_tp_ops,
+ .host_flags = IDE_HFLAG_NO_DMA,
+};
+
/*
* the static array is needed to have the name reported in /proc/ioports,
* hwif->name unfortunately isn't available yet
@@ -111,9 +132,7 @@ static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={
static int __init q40ide_init(void)
{
int i;
- ide_hwif_t *hwif;
- const char *name;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ hw_regs_t hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
if (!MACH_IS_Q40)
return -ENODEV;
@@ -121,9 +140,8 @@ static int __init q40ide_init(void)
printk(KERN_INFO "ide: Q40 IDE controller\n");
for (i = 0; i < Q40IDE_NUM_HWIFS; i++) {
- hw_regs_t hw;
+ const char *name = q40_ide_names[i];
- name = q40_ide_names[i];
if (!request_region(pcide_bases[i], 8, name)) {
printk("could not reserve ports %lx-%lx for %s\n",
pcide_bases[i],pcide_bases[i]+8,name);
@@ -135,26 +153,13 @@ static int __init q40ide_init(void)
release_region(pcide_bases[i], 8);
continue;
}
- q40_ide_setup_ports(&hw, pcide_bases[i],
- NULL,
-// m68kide_iops,
+ q40_ide_setup_ports(&hw[i], pcide_bases[i], NULL,
q40ide_default_irq(pcide_bases[i]));
- hwif = ide_find_port();
- if (hwif) {
- ide_init_port_hw(hwif, &hw);
-
- /* Q40 has a byte-swapped IDE interface */
- hwif->input_data = q40ide_input_data;
- hwif->output_data = q40ide_output_data;
-
- idx[i] = hwif->index;
- }
+ hws[i] = &hw[i];
}
- ide_device_add(idx, NULL);
-
- return 0;
+ return ide_host_add(&q40ide_port_info, hws, NULL);
}
module_init(q40ide_init);
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index 48d57cae63c..11b7f61aae4 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -519,6 +519,23 @@ static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
*ata_regs = ahwif->regbase + (14 << IDE_REG_SHIFT);
}
+#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
+static const struct ide_tp_ops au1xxx_tp_ops = {
+ .exec_command = ide_exec_command,
+ .read_status = ide_read_status,
+ .read_altstatus = ide_read_altstatus,
+ .read_sff_dma_status = ide_read_sff_dma_status,
+
+ .set_irq = ide_set_irq,
+
+ .tf_load = ide_tf_load,
+ .tf_read = ide_tf_read,
+
+ .input_data = au1xxx_input_data,
+ .output_data = au1xxx_output_data,
+};
+#endif
+
static const struct ide_port_ops au1xxx_port_ops = {
.set_pio_mode = au1xxx_set_pio_mode,
.set_dma_mode = auide_set_dma_mode,
@@ -526,6 +543,9 @@ static const struct ide_port_ops au1xxx_port_ops = {
static const struct ide_port_info au1xxx_port_info = {
.init_dma = auide_ddma_init,
+#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
+ .tp_ops = &au1xxx_tp_ops,
+#endif
.port_ops = &au1xxx_port_ops,
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
.dma_ops = &au1xxx_dma_ops,
@@ -543,11 +563,10 @@ static int au_ide_probe(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
_auide_hwif *ahwif = &auide_hwif;
- ide_hwif_t *hwif;
struct resource *res;
+ struct ide_host *host;
int ret = 0;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- hw_regs_t hw;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
char *mode = "MWDMA2";
@@ -584,36 +603,19 @@ static int au_ide_probe(struct device *dev)
goto out;
}
- hwif = ide_find_port();
- if (hwif == NULL) {
- ret = -ENOENT;
- goto out;
- }
-
memset(&hw, 0, sizeof(hw));
auide_setup_ports(&hw, ahwif);
hw.irq = ahwif->irq;
hw.dev = dev;
hw.chipset = ide_au1xxx;
- ide_init_port_hw(hwif, &hw);
-
- /* If the user has selected DDMA assisted copies,
- then set up a few local I/O function entry points
- */
-
-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
- hwif->input_data = au1xxx_input_data;
- hwif->output_data = au1xxx_output_data;
-#endif
-
- auide_hwif.hwif = hwif;
-
- idx[0] = hwif->index;
+ ret = ide_host_add(&au1xxx_port_info, hws, &host);
+ if (ret)
+ goto out;
- ide_device_add(idx, &au1xxx_port_info);
+ auide_hwif.hwif = host->ports[0];
- dev_set_drvdata(dev, hwif);
+ dev_set_drvdata(dev, host);
printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
@@ -625,10 +627,10 @@ static int au_ide_remove(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct resource *res;
- ide_hwif_t *hwif = dev_get_drvdata(dev);
+ struct ide_host *host = dev_get_drvdata(dev);
_auide_hwif *ahwif = &auide_hwif;
- ide_unregister(hwif);
+ ide_host_remove(host);
iounmap((void *)ahwif->regbase);
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 9f1212cc4ae..badf79fc9e3 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -72,12 +72,11 @@ static const struct ide_port_info swarm_port_info = {
*/
static int __devinit swarm_ide_probe(struct device *dev)
{
- ide_hwif_t *hwif;
u8 __iomem *base;
+ struct ide_host *host;
phys_t offset, size;
- hw_regs_t hw;
- int i;
- u8 idx[] = { 0xff, 0xff, 0xff, 0xff };
+ int i, rc;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
if (!SIBYTE_HAVE_IDE)
return -ENODEV;
@@ -116,26 +115,17 @@ static int __devinit swarm_ide_probe(struct device *dev)
hw.irq = K_INT_GB_IDE;
hw.chipset = ide_generic;
- hwif = ide_find_port_slot(&swarm_port_info);
- if (hwif == NULL)
+ rc = ide_host_add(&swarm_port_info, hws, &host);
+ if (rc)
goto err;
- ide_init_port_hw(hwif, &hw);
-
- /* Setup MMIO ops. */
- default_hwif_mmiops(hwif);
-
- idx[0] = hwif->index;
-
- ide_device_add(idx, &swarm_port_info);
-
- dev_set_drvdata(dev, hwif);
+ dev_set_drvdata(dev, host);
return 0;
err:
release_resource(&swarm_ide_resource);
iounmap(base);
- return -ENOMEM;
+ return rc;
}
static struct device_driver swarm_ide_driver = {
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index ae7a4329a58..fbc43e121e6 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -195,7 +195,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
.host_flags = IDE_HFLAG_SERIALIZE |
IDE_HFLAG_NO_ATAPI_DMA |
IDE_HFLAG_NO_DSC |
- IDE_HFLAG_ABUSE_SET_DMA_MODE |
IDE_HFLAG_OFF_BOARD,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -205,7 +204,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
.init_chipset = init_chipset_aec62xx,
.port_ops = &atp86x_port_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA |
- IDE_HFLAG_ABUSE_SET_DMA_MODE |
IDE_HFLAG_OFF_BOARD,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -216,7 +214,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
.enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
.port_ops = &atp86x_port_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA |
- IDE_HFLAG_ABUSE_SET_DMA_MODE |
IDE_HFLAG_NON_BOOTABLE,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -226,7 +223,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
.init_chipset = init_chipset_aec62xx,
.port_ops = &atp86x_port_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA |
- IDE_HFLAG_ABUSE_SET_DMA_MODE |
IDE_HFLAG_OFF_BOARD,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -237,7 +233,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
.enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
.port_ops = &atp86x_port_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA |
- IDE_HFLAG_ABUSE_SET_DMA_MODE |
IDE_HFLAG_OFF_BOARD,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index 80d19c0eb78..5ef7817ac64 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -471,7 +471,15 @@ static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long base = ide_pci_dma_base(hwif, d);
- if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
+ if (base == 0)
+ return -1;
+
+ hwif->dma_base = base;
+
+ if (ide_pci_check_simplex(hwif, d) < 0)
+ return -1;
+
+ if (ide_pci_set_master(dev, d->name) < 0)
return -1;
if (!hwif->channel)
@@ -483,7 +491,7 @@ static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
if (ide_allocate_dma_engine(hwif))
return -1;
- ide_setup_dma(hwif, base);
+ hwif->dma_ops = &sff_dma_ops;
return 0;
}
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 0bfcdd0e77b..ef7d971031e 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -218,7 +218,6 @@ static const struct ide_port_ops amd_port_ops = {
#define IDE_HFLAGS_AMD \
(IDE_HFLAG_PIO_NO_BLACKLIST | \
- IDE_HFLAG_ABUSE_SET_DMA_MODE | \
IDE_HFLAG_POST_SET_MODE | \
IDE_HFLAG_IO_32BIT | \
IDE_HFLAG_UNMASK_IRQS)
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
index 1ad1e23e310..e6c62006ca1 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/pci/cmd640.c
@@ -181,11 +181,6 @@ static u8 recovery_counts[4] = {16, 16, 16, 16}; /* Recovery count (encoded) */
static DEFINE_SPINLOCK(cmd640_lock);
/*
- * These are initialized to point at the devices we control
- */
-static ide_hwif_t *cmd_hwif0, *cmd_hwif1;
-
-/*
* Interface to access cmd640x registers
*/
static unsigned int cmd640_key;
@@ -717,8 +712,7 @@ static int __init cmd640x_init(void)
int second_port_cmd640 = 0, rc;
const char *bus_type, *port2;
u8 b, cfr;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- hw_regs_t hw[2];
+ hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
if (cmd640_vlb && probe_for_cmd640_vlb()) {
bus_type = "VLB";
@@ -781,15 +775,10 @@ static int __init cmd640x_init(void)
printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x"
"\n", 'a' + cmd640_chip_version - 1, bus_type, cfr);
- cmd_hwif0 = ide_find_port();
-
/*
* Initialize data for primary port
*/
- if (cmd_hwif0) {
- ide_init_port_hw(cmd_hwif0, &hw[0]);
- idx[0] = cmd_hwif0->index;
- }
+ hws[0] = &hw[0];
/*
* Ensure compatibility by always using the slowest timings
@@ -829,13 +818,9 @@ static int __init cmd640x_init(void)
/*
* Initialize data for secondary cmd640 port, if enabled
*/
- if (second_port_cmd640) {
- cmd_hwif1 = ide_find_port();
- if (cmd_hwif1) {
- ide_init_port_hw(cmd_hwif1, &hw[1]);
- idx[1] = cmd_hwif1->index;
- }
- }
+ if (second_port_cmd640)
+ hws[1] = &hw[1];
+
printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n",
second_port_cmd640 ? "" : "not ", port2);
@@ -843,9 +828,7 @@ static int __init cmd640x_init(void)
cmd640_dump_regs();
#endif
- ide_device_add(idx, &cmd640_port_info);
-
- return 1;
+ return ide_host_add(&cmd640_port_info, hws, NULL);
}
module_param_named(probe_vlb, cmd640_vlb, bool, 0);
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index cfa784bacf4..ce58bfcdb3c 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -262,7 +262,7 @@ static int cmd648_dma_test_irq(ide_drive_t *drive)
unsigned long base = hwif->dma_base - (hwif->channel * 8);
u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 :
MRDMODE_INTR_CH0;
- u8 dma_stat = inb(hwif->dma_status);
+ u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
u8 mrdmode = inb(base + 1);
#ifdef DEBUG
@@ -286,7 +286,7 @@ static int cmd64x_dma_test_irq(ide_drive_t *drive)
int irq_reg = hwif->channel ? ARTTIM23 : CFR;
u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 :
CFR_INTR_CH0;
- u8 dma_stat = inb(hwif->dma_status);
+ u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
u8 irq_stat = 0;
(void) pci_read_config_byte(dev, irq_reg, &irq_stat);
@@ -317,13 +317,13 @@ static int cmd646_1_dma_end(ide_drive_t *drive)
drive->waiting_for_dma = 0;
/* get DMA status */
- dma_stat = inb(hwif->dma_status);
+ dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
/* read DMA command state */
- dma_cmd = inb(hwif->dma_command);
+ dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
/* stop DMA */
- outb(dma_cmd & ~1, hwif->dma_command);
+ outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
/* clear the INTR & ERROR bits */
- outb(dma_stat | 6, hwif->dma_status);
+ outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
/* and free any DMA resources */
ide_destroy_dmatable(drive);
/* verify good DMA status */
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
index 992b1cf8db6..b03d8ae947e 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/pci/cs5520.c
@@ -62,8 +62,6 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
struct pci_dev *pdev = to_pci_dev(hwif->dev);
int controller = drive->dn > 1 ? 1 : 0;
- /* FIXME: if DMA = 1 do we need to set the DMA bit here ? */
-
/* 8bit CAT/CRT - 8bit command timing for channel */
pci_write_config_byte(pdev, 0x62 + controller,
(cs5520_pio_clocks[pio].recovery << 4) |
@@ -89,46 +87,17 @@ static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed)
cs5520_set_pio_mode(drive, 0);
}
-/*
- * We wrap the DMA activate to set the vdma flag. This is needed
- * so that the IDE DMA layer issues PIO not DMA commands over the
- * DMA channel
- *
- * ATAPI is harder so disable it for now using IDE_HFLAG_NO_ATAPI_DMA
- */
-
-static void cs5520_dma_host_set(ide_drive_t *drive, int on)
-{
- drive->vdma = on;
- ide_dma_host_set(drive, on);
-}
-
static const struct ide_port_ops cs5520_port_ops = {
.set_pio_mode = cs5520_set_pio_mode,
.set_dma_mode = cs5520_set_dma_mode,
};
-static const struct ide_dma_ops cs5520_dma_ops = {
- .dma_host_set = cs5520_dma_host_set,
- .dma_setup = ide_dma_setup,
- .dma_exec_cmd = ide_dma_exec_cmd,
- .dma_start = ide_dma_start,
- .dma_end = __ide_dma_end,
- .dma_test_irq = ide_dma_test_irq,
- .dma_lost_irq = ide_dma_lost_irq,
- .dma_timeout = ide_dma_timeout,
-};
-
-/* FIXME: VDMA is disabled because it caused system hangs */
#define DECLARE_CS_DEV(name_str) \
{ \
.name = name_str, \
.port_ops = &cs5520_port_ops, \
- .dma_ops = &cs5520_dma_ops, \
.host_flags = IDE_HFLAG_ISA_PORTS | \
- IDE_HFLAG_CS5520 | \
- IDE_HFLAG_NO_ATAPI_DMA | \
- IDE_HFLAG_ABUSE_SET_DMA_MODE, \
+ IDE_HFLAG_CS5520, \
.pio_mask = ATA_PIO4, \
}
@@ -146,7 +115,7 @@ static const struct ide_port_info cyrix_chipsets[] __devinitdata = {
static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct ide_port_info *d = &cyrix_chipsets[id->driver_data];
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
ide_setup_pci_noise(dev, d);
@@ -168,11 +137,9 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
* do all the device setup for us
*/
- ide_pci_setup_ports(dev, d, 14, &idx[0]);
-
- ide_device_add(idx, d);
+ ide_pci_setup_ports(dev, d, 14, &hw[0], &hws[0]);
- return 0;
+ return ide_host_add(d, hws, NULL);
}
static const struct pci_device_id cs5520_pci_tbl[] = {
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
index dc97c48623f..5404fe4f701 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/pci/cs5535.c
@@ -171,8 +171,7 @@ static const struct ide_port_ops cs5535_port_ops = {
static const struct ide_port_info cs5535_chipset __devinitdata = {
.name = "CS5535",
.port_ops = &cs5535_port_ops,
- .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE |
- IDE_HFLAG_ABUSE_SET_DMA_MODE,
+ .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
index 0106e2a2df7..f84bfb4f600 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/pci/delkin_cb.c
@@ -56,11 +56,10 @@ static const struct ide_port_info delkin_cb_port_info = {
static int __devinit
delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
{
+ struct ide_host *host;
unsigned long base;
- hw_regs_t hw;
- ide_hwif_t *hwif = NULL;
int i, rc;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
rc = pci_enable_device(dev);
if (rc) {
@@ -87,34 +86,26 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
hw.dev = &dev->dev;
hw.chipset = ide_pci; /* this enables IRQ sharing */
- hwif = ide_find_port();
- if (hwif == NULL)
+ rc = ide_host_add(&delkin_cb_port_info, hws, &host);
+ if (rc)
goto out_disable;
- i = hwif->index;
-
- ide_init_port_hw(hwif, &hw);
-
- idx[0] = i;
-
- ide_device_add(idx, &delkin_cb_port_info);
-
- pci_set_drvdata(dev, hwif);
+ pci_set_drvdata(dev, host);
return 0;
out_disable:
pci_release_regions(dev);
pci_disable_device(dev);
- return -ENODEV;
+ return rc;
}
static void
delkin_cb_remove (struct pci_dev *dev)
{
- ide_hwif_t *hwif = pci_get_drvdata(dev);
+ struct ide_host *host = pci_get_drvdata(dev);
- ide_unregister(hwif);
+ ide_host_remove(host);
pci_release_regions(dev);
pci_disable_device(dev);
diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
index 84c36c11719..9e1d1c4741d 100644
--- a/drivers/ide/pci/hpt34x.c
+++ b/drivers/ide/pci/hpt34x.c
@@ -123,7 +123,6 @@ static const struct ide_port_ops hpt34x_port_ops = {
#define IDE_HFLAGS_HPT34X \
(IDE_HFLAG_NO_ATAPI_DMA | \
IDE_HFLAG_NO_DSC | \
- IDE_HFLAG_ABUSE_SET_DMA_MODE | \
IDE_HFLAG_NO_AUTODMA)
static const struct ide_port_info hpt34x_chipsets[] __devinitdata = {
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 397c6cbe953..1f1135ce7cd 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -801,9 +801,9 @@ static void hpt370_irq_timeout(ide_drive_t *drive)
printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff);
/* get DMA command mode */
- dma_cmd = inb(hwif->dma_command);
+ dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
/* stop DMA */
- outb(dma_cmd & ~0x1, hwif->dma_command);
+ outb(dma_cmd & ~0x1, hwif->dma_base + ATA_DMA_CMD);
hpt370_clear_engine(drive);
}
@@ -818,12 +818,12 @@ static void hpt370_dma_start(ide_drive_t *drive)
static int hpt370_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
- u8 dma_stat = inb(hwif->dma_status);
+ u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
if (dma_stat & 0x01) {
/* wait a little */
udelay(20);
- dma_stat = inb(hwif->dma_status);
+ dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
if (dma_stat & 0x01)
hpt370_irq_timeout(drive);
}
@@ -850,7 +850,7 @@ static int hpt374_dma_test_irq(ide_drive_t *drive)
return 0;
}
- dma_stat = inb(hwif->dma_status);
+ dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
/* return 1 if INTR asserted */
if (dma_stat & 4)
return 1;
@@ -1320,7 +1320,15 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
unsigned long flags, base = ide_pci_dma_base(hwif, d);
u8 dma_old, dma_new, masterdma = 0, slavedma = 0;
- if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
+ if (base == 0)
+ return -1;
+
+ hwif->dma_base = base;
+
+ if (ide_pci_check_simplex(hwif, d) < 0)
+ return -1;
+
+ if (ide_pci_set_master(dev, d->name) < 0)
return -1;
dma_old = inb(base + 2);
@@ -1346,7 +1354,7 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
if (ide_allocate_dma_engine(hwif))
return -1;
- ide_setup_dma(hwif, base);
+ hwif->dma_ops = &sff_dma_ops;
return 0;
}
@@ -1401,7 +1409,6 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
#define IDE_HFLAGS_HPT3XX \
(IDE_HFLAG_NO_ATAPI_DMA | \
- IDE_HFLAG_ABUSE_SET_DMA_MODE | \
IDE_HFLAG_OFF_BOARD)
static const struct ide_port_ops hpt3xx_port_ops = {
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/pci/ns87415.c
index 45ba71a7182..5cd2b32ff0e 100644
--- a/drivers/ide/pci/ns87415.c
+++ b/drivers/ide/pci/ns87415.c
@@ -28,10 +28,6 @@
*/
#include <asm/superio.h>
-static unsigned long superio_ide_status[2];
-static unsigned long superio_ide_select[2];
-static unsigned long superio_ide_dma_status[2];
-
#define SUPERIO_IDE_MAX_RETRIES 25
/* Because of a defect in Super I/O, all reads of the PCI DMA status
@@ -40,27 +36,28 @@ static unsigned long superio_ide_dma_status[2];
*/
static u8 superio_ide_inb (unsigned long port)
{
- if (port == superio_ide_status[0] ||
- port == superio_ide_status[1] ||
- port == superio_ide_select[0] ||
- port == superio_ide_select[1] ||
- port == superio_ide_dma_status[0] ||
- port == superio_ide_dma_status[1]) {
- u8 tmp;
- int retries = SUPERIO_IDE_MAX_RETRIES;
+ u8 tmp;
+ int retries = SUPERIO_IDE_MAX_RETRIES;
- /* printk(" [ reading port 0x%x with retry ] ", port); */
+ /* printk(" [ reading port 0x%x with retry ] ", port); */
- do {
- tmp = inb(port);
- if (tmp == 0)
- udelay(50);
- } while (tmp == 0 && retries-- > 0);
+ do {
+ tmp = inb(port);
+ if (tmp == 0)
+ udelay(50);
+ } while (tmp == 0 && retries-- > 0);
- return tmp;
- }
+ return tmp;
+}
- return inb(port);
+static u8 superio_read_status(ide_hwif_t *hwif)
+{
+ return superio_ide_inb(hwif->io_ports.status_addr);
+}
+
+static u8 superio_read_sff_dma_status(ide_hwif_t *hwif)
+{
+ return superio_ide_inb(hwif->dma_base + ATA_DMA_STATUS);
}
static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
@@ -78,6 +75,8 @@ static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
/* be sure we're looking at the low order bits */
outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
+ tf->feature = inb(io_ports->feature_addr);
if (task->tf_flags & IDE_TFLAG_IN_NSECT)
tf->nsect = inb(io_ports->nsect_addr);
if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -105,36 +104,32 @@ static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
}
}
-static void __devinit superio_ide_init_iops (struct hwif_s *hwif)
-{
- struct pci_dev *pdev = to_pci_dev(hwif->dev);
- u32 base, dmabase;
- u8 port = hwif->channel, tmp;
+static const struct ide_tp_ops superio_tp_ops = {
+ .exec_command = ide_exec_command,
+ .read_status = superio_read_status,
+ .read_altstatus = ide_read_altstatus,
+ .read_sff_dma_status = superio_read_sff_dma_status,
- base = pci_resource_start(pdev, port * 2) & ~3;
- dmabase = pci_resource_start(pdev, 4) & ~3;
-
- superio_ide_status[port] = base + 7;
- superio_ide_select[port] = base + 6;
- superio_ide_dma_status[port] = dmabase + (!port ? 2 : 0xa);
-
- /* Clear error/interrupt, enable dma */
- tmp = superio_ide_inb(superio_ide_dma_status[port]);
- outb(tmp | 0x66, superio_ide_dma_status[port]);
+ .set_irq = ide_set_irq,
- hwif->tf_read = superio_tf_read;
+ .tf_load = ide_tf_load,
+ .tf_read = superio_tf_read,
- /* We need to override inb to workaround a SuperIO errata */
- hwif->INB = superio_ide_inb;
-}
+ .input_data = ide_input_data,
+ .output_data = ide_output_data,
+};
-static void __devinit init_iops_ns87415(ide_hwif_t *hwif)
+static void __devinit superio_init_iops(struct hwif_s *hwif)
{
- struct pci_dev *dev = to_pci_dev(hwif->dev);
+ struct pci_dev *pdev = to_pci_dev(hwif->dev);
+ u32 dma_stat;
+ u8 port = hwif->channel, tmp;
- if (PCI_SLOT(dev->devfn) == 0xE)
- /* Built-in - assume it's under superio. */
- superio_ide_init_iops(hwif);
+ dma_stat = (pci_resource_start(pdev, 4) & ~3) + (!port ? 2 : 0xa);
+
+ /* Clear error/interrupt, enable dma */
+ tmp = superio_ide_inb(dma_stat);
+ outb(tmp | 0x66, dma_stat);
}
#endif
@@ -200,14 +195,14 @@ static int ns87415_dma_end(ide_drive_t *drive)
u8 dma_stat = 0, dma_cmd = 0;
drive->waiting_for_dma = 0;
- dma_stat = hwif->INB(hwif->dma_status);
- /* get dma command mode */
- dma_cmd = hwif->INB(hwif->dma_command);
+ dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
+ /* get DMA command mode */
+ dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
/* stop DMA */
- outb(dma_cmd & ~1, hwif->dma_command);
+ outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
/* from ERRATA: clear the INTR & ERROR bits */
- dma_cmd = hwif->INB(hwif->dma_command);
- outb(dma_cmd | 6, hwif->dma_command);
+ dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
+ outb(dma_cmd | 6, hwif->dma_base + ATA_DMA_CMD);
/* and free any DMA resources */
ide_destroy_dmatable(drive);
/* verify good DMA status */
@@ -276,7 +271,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
outb(8, hwif->io_ports.ctl_addr);
do {
udelay(50);
- stat = hwif->INB(hwif->io_ports.status_addr);
+ stat = hwif->tp_ops->read_status(hwif);
if (stat == 0xff)
break;
} while ((stat & BUSY_STAT) && --timeout);
@@ -291,7 +286,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
if (!hwif->dma_base)
return;
- outb(0x60, hwif->dma_status);
+ outb(0x60, hwif->dma_base + ATA_DMA_STATUS);
}
static const struct ide_port_ops ns87415_port_ops = {
@@ -311,9 +306,6 @@ static const struct ide_dma_ops ns87415_dma_ops = {
static const struct ide_port_info ns87415_chipset __devinitdata = {
.name = "NS87415",
-#ifdef CONFIG_SUPERIO
- .init_iops = init_iops_ns87415,
-#endif
.init_hwif = init_hwif_ns87415,
.port_ops = &ns87415_port_ops,
.dma_ops = &ns87415_dma_ops,
@@ -323,7 +315,16 @@ static const struct ide_port_info ns87415_chipset __devinitdata = {
static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
- return ide_setup_pci_device(dev, &ns87415_chipset);
+ struct ide_port_info d = ns87415_chipset;
+
+#ifdef CONFIG_SUPERIO
+ if (PCI_SLOT(dev->devfn) == 0xE) {
+ /* Built-in - assume it's under superio. */
+ d.init_iops = superio_init_iops;
+ d.tp_ops = &superio_tp_ops;
+ }
+#endif
+ return ide_setup_pci_device(dev, &d);
}
static const struct pci_device_id ns87415_pci_tbl[] = {
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index fca89eda5c0..e54dc653b8c 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -206,7 +206,7 @@ static int pdc202xx_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned long high_16 = hwif->extra_base - 16;
- u8 dma_stat = inb(hwif->dma_status);
+ u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
u8 sc1d = inb(high_16 + 0x001d);
if (hwif->channel) {
@@ -312,7 +312,6 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
#define IDE_HFLAGS_PDC202XX \
(IDE_HFLAG_ERROR_STOPS_FIFO | \
- IDE_HFLAG_ABUSE_SET_DMA_MODE | \
IDE_HFLAG_OFF_BOARD)
static const struct ide_port_ops pdc20246_port_ops = {
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index f04738d14a6..0ce41b4ddda 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -227,9 +227,9 @@ static void piix_dma_clear_irq(ide_drive_t *drive)
u8 dma_stat;
/* clear the INTR & ERROR bits */
- dma_stat = inb(hwif->dma_status);
+ dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
/* Should we force the bit as well ? */
- outb(dma_stat, hwif->dma_status);
+ outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
}
struct ich_laptop {
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index 789c66dfbde..94a7ab86423 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -65,7 +65,7 @@
static struct scc_ports {
unsigned long ctl, dma;
- ide_hwif_t *hwif; /* for removing port from system */
+ struct ide_host *host; /* for removing port from system */
} scc_ports[MAX_HWIFS];
/* PIO transfer mode table */
@@ -126,6 +126,46 @@ static u8 scc_ide_inb(unsigned long port)
return (u8)data;
}
+static void scc_exec_command(ide_hwif_t *hwif, u8 cmd)
+{
+ out_be32((void *)hwif->io_ports.command_addr, cmd);
+ eieio();
+ in_be32((void *)(hwif->dma_base + 0x01c));
+ eieio();
+}
+
+static u8 scc_read_status(ide_hwif_t *hwif)
+{
+ return (u8)in_be32((void *)hwif->io_ports.status_addr);
+}
+
+static u8 scc_read_altstatus(ide_hwif_t *hwif)
+{
+ return (u8)in_be32((void *)hwif->io_ports.ctl_addr);
+}
+
+static u8 scc_read_sff_dma_status(ide_hwif_t *hwif)
+{
+ return (u8)in_be32((void *)(hwif->dma_base + 4));
+}
+
+static void scc_set_irq(ide_hwif_t *hwif, int on)
+{
+ u8 ctl = ATA_DEVCTL_OBS;
+
+ if (on == 4) { /* hack for SRST */
+ ctl |= 4;
+ on &= ~4;
+ }
+
+ ctl |= on ? 0 : 2;
+
+ out_be32((void *)hwif->io_ports.ctl_addr, ctl);
+ eieio();
+ in_be32((void *)(hwif->dma_base + 0x01c));
+ eieio();
+}
+
static void scc_ide_insw(unsigned long port, void *addr, u32 count)
{
u16 *ptr = (u16 *)addr;
@@ -148,14 +188,6 @@ static void scc_ide_outb(u8 addr, unsigned long port)
out_be32((void*)port, addr);
}
-static void scc_ide_outbsync(ide_hwif_t *hwif, u8 addr, unsigned long port)
-{
- out_be32((void*)port, addr);
- eieio();
- in_be32((void*)(hwif->dma_base + 0x01c));
- eieio();
-}
-
static void
scc_ide_outsw(unsigned long port, void *addr, u32 count)
{
@@ -261,14 +293,14 @@ static void scc_dma_host_set(ide_drive_t *drive, int on)
{
ide_hwif_t *hwif = drive->hwif;
u8 unit = (drive->select.b.unit & 0x01);
- u8 dma_stat = scc_ide_inb(hwif->dma_status);
+ u8 dma_stat = scc_ide_inb(hwif->dma_base + 4);
if (on)
dma_stat |= (1 << (5 + unit));
else
dma_stat &= ~(1 << (5 + unit));
- scc_ide_outb(dma_stat, hwif->dma_status);
+ scc_ide_outb(dma_stat, hwif->dma_base + 4);
}
/**
@@ -304,13 +336,13 @@ static int scc_dma_setup(ide_drive_t *drive)
out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
/* specify r/w */
- out_be32((void __iomem *)hwif->dma_command, reading);
+ out_be32((void __iomem *)hwif->dma_base, reading);
- /* read dma_status for INTR & ERROR flags */
- dma_stat = in_be32((void __iomem *)hwif->dma_status);
+ /* read DMA status for INTR & ERROR flags */
+ dma_stat = in_be32((void __iomem *)(hwif->dma_base + 4));
/* clear INTR & ERROR flags */
- out_be32((void __iomem *)hwif->dma_status, dma_stat|6);
+ out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6);
drive->waiting_for_dma = 1;
return 0;
}
@@ -318,10 +350,10 @@ static int scc_dma_setup(ide_drive_t *drive)
static void scc_dma_start(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
- u8 dma_cmd = scc_ide_inb(hwif->dma_command);
+ u8 dma_cmd = scc_ide_inb(hwif->dma_base);
/* start DMA */
- scc_ide_outb(dma_cmd | 1, hwif->dma_command);
+ scc_ide_outb(dma_cmd | 1, hwif->dma_base);
hwif->dma = 1;
wmb();
}
@@ -333,13 +365,13 @@ static int __scc_dma_end(ide_drive_t *drive)
drive->waiting_for_dma = 0;
/* get DMA command mode */
- dma_cmd = scc_ide_inb(hwif->dma_command);
+ dma_cmd = scc_ide_inb(hwif->dma_base);
/* stop DMA */
- scc_ide_outb(dma_cmd & ~1, hwif->dma_command);
+ scc_ide_outb(dma_cmd & ~1, hwif->dma_base);
/* get DMA status */
- dma_stat = scc_ide_inb(hwif->dma_status);
+ dma_stat = scc_ide_inb(hwif->dma_base + 4);
/* clear the INTR & ERROR bits */
- scc_ide_outb(dma_stat | 6, hwif->dma_status);
+ scc_ide_outb(dma_stat | 6, hwif->dma_base + 4);
/* purge DMA mappings */
ide_destroy_dmatable(drive);
/* verify good DMA status */
@@ -359,6 +391,7 @@ static int __scc_dma_end(ide_drive_t *drive)
static int scc_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
+ void __iomem *dma_base = (void __iomem *)hwif->dma_base;
unsigned long intsts_port = hwif->dma_base + 0x014;
u32 reg;
int dma_stat, data_loss = 0;
@@ -397,7 +430,7 @@ static int scc_dma_end(ide_drive_t *drive)
printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME);
out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT);
- out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS);
+ out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
continue;
}
@@ -412,7 +445,7 @@ static int scc_dma_end(ide_drive_t *drive)
out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT);
- out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS);
+ out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
continue;
}
@@ -420,12 +453,12 @@ static int scc_dma_end(ide_drive_t *drive)
printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME);
out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT);
- out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS);
+ out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
continue;
}
if (reg & INTSTS_ICERR) {
- out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS);
+ out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME);
out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT);
@@ -553,14 +586,9 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
const struct ide_port_info *d)
{
struct scc_ports *ports = pci_get_drvdata(dev);
- ide_hwif_t *hwif = NULL;
- hw_regs_t hw;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- int i;
-
- hwif = ide_find_port_slot(d);
- if (hwif == NULL)
- return -ENOMEM;
+ struct ide_host *host;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ int i, rc;
memset(&hw, 0, sizeof(hw));
for (i = 0; i <= 8; i++)
@@ -568,11 +596,12 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
hw.irq = dev->irq;
hw.dev = &dev->dev;
hw.chipset = ide_pci;
- ide_init_port_hw(hwif, &hw);
- idx[0] = hwif->index;
+ rc = ide_host_add(d, hws, &host);
+ if (rc)
+ return rc;
- ide_device_add(idx, d);
+ ports->host = host;
return 0;
}
@@ -701,6 +730,8 @@ static void scc_tf_read(ide_drive_t *drive, ide_task_t *task)
/* be sure we're looking at the low order bits */
scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
+ tf->feature = scc_ide_inb(io_ports->feature_addr);
if (task->tf_flags & IDE_TFLAG_IN_NSECT)
tf->nsect = scc_ide_inb(io_ports->nsect_addr);
if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -774,16 +805,6 @@ static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
ide_set_hwifdata(hwif, ports);
- hwif->tf_load = scc_tf_load;
- hwif->tf_read = scc_tf_read;
-
- hwif->input_data = scc_input_data;
- hwif->output_data = scc_output_data;
-
- hwif->INB = scc_ide_inb;
- hwif->OUTB = scc_ide_outb;
- hwif->OUTBSYNC = scc_ide_outbsync;
-
hwif->dma_base = dma_base;
hwif->config_data = ports->ctl;
}
@@ -824,11 +845,6 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
{
struct scc_ports *ports = ide_get_hwifdata(hwif);
- ports->hwif = hwif;
-
- hwif->dma_command = hwif->dma_base;
- hwif->dma_status = hwif->dma_base + 0x04;
-
/* PTERADD */
out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
@@ -838,6 +854,21 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
}
+static const struct ide_tp_ops scc_tp_ops = {
+ .exec_command = scc_exec_command,
+ .read_status = scc_read_status,
+ .read_altstatus = scc_read_altstatus,
+ .read_sff_dma_status = scc_read_sff_dma_status,
+
+ .set_irq = scc_set_irq,
+
+ .tf_load = scc_tf_load,
+ .tf_read = scc_tf_read,
+
+ .input_data = scc_input_data,
+ .output_data = scc_output_data,
+};
+
static const struct ide_port_ops scc_port_ops = {
.set_pio_mode = scc_set_pio_mode,
.set_dma_mode = scc_set_dma_mode,
@@ -861,6 +892,7 @@ static const struct ide_dma_ops scc_dma_ops = {
.name = name_str, \
.init_iops = init_iops_scc, \
.init_hwif = init_hwif_scc, \
+ .tp_ops = &scc_tp_ops, \
.port_ops = &scc_port_ops, \
.dma_ops = &scc_dma_ops, \
.host_flags = IDE_HFLAG_SINGLE, \
@@ -895,7 +927,8 @@ static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_i
static void __devexit scc_remove(struct pci_dev *dev)
{
struct scc_ports *ports = pci_get_drvdata(dev);
- ide_hwif_t *hwif = ports->hwif;
+ struct ide_host *host = ports->host;
+ ide_hwif_t *hwif = host->ports[0];
if (hwif->dmatable_cpu) {
pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
@@ -903,7 +936,7 @@ static void __devexit scc_remove(struct pci_dev *dev)
hwif->dmatable_cpu = NULL;
}
- ide_unregister(hwif);
+ ide_host_remove(host);
iounmap((void*)ports->dma);
iounmap((void*)ports->ctl);
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index a1fb20826a5..127ccb45e26 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -349,9 +349,7 @@ static const struct ide_port_ops svwks_port_ops = {
.cable_detect = svwks_cable_detect,
};
-#define IDE_HFLAGS_SVWKS \
- (IDE_HFLAG_LEGACY_IRQS | \
- IDE_HFLAG_ABUSE_SET_DMA_MODE)
+#define IDE_HFLAGS_SVWKS IDE_HFLAG_LEGACY_IRQS
static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
{ /* 0 */
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index c79ff5b4108..42eef19a18f 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -127,7 +127,7 @@ sgiioc4_checkirq(ide_hwif_t * hwif)
return 0;
}
-static u8 sgiioc4_INB(unsigned long);
+static u8 sgiioc4_read_status(ide_hwif_t *);
static int
sgiioc4_clearirq(ide_drive_t * drive)
@@ -141,18 +141,19 @@ sgiioc4_clearirq(ide_drive_t * drive)
intr_reg = readl((void __iomem *)other_ir);
if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */
/*
- * Using sgiioc4_INB to read the Status register has a side
- * effect of clearing the interrupt. The first read should
+ * Using sgiioc4_read_status to read the Status register has a
+ * side effect of clearing the interrupt. The first read should
* clear it if it is set. The second read should return
* a "clear" status if it got cleared. If not, then spin
* for a bit trying to clear it.
*/
- u8 stat = sgiioc4_INB(io_ports->status_addr);
+ u8 stat = sgiioc4_read_status(hwif);
int count = 0;
- stat = sgiioc4_INB(io_ports->status_addr);
+
+ stat = sgiioc4_read_status(hwif);
while ((stat & 0x80) && (count++ < 100)) {
udelay(1);
- stat = sgiioc4_INB(io_ports->status_addr);
+ stat = sgiioc4_read_status(hwif);
}
if (intr_reg & 0x02) {
@@ -304,9 +305,9 @@ sgiioc4_dma_lost_irq(ide_drive_t * drive)
ide_dma_lost_irq(drive);
}
-static u8
-sgiioc4_INB(unsigned long port)
+static u8 sgiioc4_read_status(ide_hwif_t *hwif)
{
+ unsigned long port = hwif->io_ports.status_addr;
u8 reg = (u8) readb((void __iomem *) port);
if ((port & 0xFFF) == 0x11C) { /* Status register of IOC4 */
@@ -549,6 +550,21 @@ static int sgiioc4_dma_setup(ide_drive_t *drive)
return 0;
}
+static const struct ide_tp_ops sgiioc4_tp_ops = {
+ .exec_command = ide_exec_command,
+ .read_status = sgiioc4_read_status,
+ .read_altstatus = ide_read_altstatus,
+ .read_sff_dma_status = ide_read_sff_dma_status,
+
+ .set_irq = ide_set_irq,
+
+ .tf_load = ide_tf_load,
+ .tf_read = ide_tf_read,
+
+ .input_data = ide_input_data,
+ .output_data = ide_output_data,
+};
+
static const struct ide_port_ops sgiioc4_port_ops = {
.set_dma_mode = sgiioc4_set_dma_mode,
/* reset DMA engine, clear IRQs */
@@ -571,6 +587,7 @@ static const struct ide_port_info sgiioc4_port_info __devinitdata = {
.name = DRV_NAME,
.chipset = ide_pci,
.init_dma = ide_dma_sgiioc4,
+ .tp_ops = &sgiioc4_tp_ops,
.port_ops = &sgiioc4_port_ops,
.dma_ops = &sgiioc4_dma_ops,
.host_flags = IDE_HFLAG_MMIO,
@@ -583,10 +600,10 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
unsigned long cmd_base, irqport;
unsigned long bar0, cmd_phys_base, ctl;
void __iomem *virt_base;
- ide_hwif_t *hwif;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
- hw_regs_t hw;
+ struct ide_host *host;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
struct ide_port_info d = sgiioc4_port_info;
+ int rc;
/* Get the CmdBlk and CtrlBlk Base Registers */
bar0 = pci_resource_start(dev, 0);
@@ -618,30 +635,26 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
hw.chipset = ide_pci;
hw.dev = &dev->dev;
- hwif = ide_find_port_slot(&d);
- if (hwif == NULL)
- goto err;
-
- ide_init_port_hw(hwif, &hw);
-
- /* The IOC4 uses MMIO rather than Port IO. */
- default_hwif_mmiops(hwif);
-
/* Initializing chipset IRQ Registers */
writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
- hwif->INB = &sgiioc4_INB;
-
- idx[0] = hwif->index;
+ host = ide_host_alloc(&d, hws);
+ if (host == NULL) {
+ rc = -ENOMEM;
+ goto err;
+ }
- if (ide_device_add(idx, &d))
- return -EIO;
+ rc = ide_host_register(host, &d, hws);
+ if (rc)
+ goto err_free;
return 0;
+err_free:
+ ide_host_free(host);
err:
release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE);
iounmap(virt_base);
- return -ENOMEM;
+ return rc;
}
static unsigned int __devinit
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 6e9d7655d89..5965a35d94a 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -334,7 +334,7 @@ static int siimage_io_dma_test_irq(ide_drive_t *drive)
unsigned long addr = siimage_selreg(hwif, 1);
/* return 1 if INTR asserted */
- if (hwif->INB(hwif->dma_status) & 4)
+ if (inb(hwif->dma_base + ATA_DMA_STATUS) & 4)
return 1;
/* return 1 if Device INTR asserted */
@@ -382,7 +382,7 @@ static int siimage_mmio_dma_test_irq(ide_drive_t *drive)
}
/* return 1 if INTR asserted */
- if (readb((void __iomem *)hwif->dma_status) & 0x04)
+ if (readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)) & 4)
return 1;
/* return 1 if Device INTR asserted */
@@ -601,7 +601,7 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
* Fill in the basic hwif bits
*/
hwif->host_flags |= IDE_HFLAG_MMIO;
- default_hwif_mmiops(hwif);
+
hwif->hwif_data = addr;
/*
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
index 6efbde29717..f82a6502c1b 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/pci/sl82c105.c
@@ -157,9 +157,9 @@ static void sl82c105_dma_lost_irq(ide_drive_t *drive)
* Was DMA enabled? If so, disable it - we're resetting the
* host. The IDE layer will be handling the drive for us.
*/
- dma_cmd = inb(hwif->dma_command);
+ dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
if (dma_cmd & 1) {
- outb(dma_cmd & ~1, hwif->dma_command);
+ outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
printk("sl82c105: DMA was enabled\n");
}
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index 9b4b27a4c71..477e1979010 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -63,7 +63,7 @@ static int tc86c001_timer_expiry(ide_drive_t *drive)
ide_hwif_t *hwif = HWIF(drive);
ide_expiry_t *expiry = ide_get_hwifdata(hwif);
ide_hwgroup_t *hwgroup = HWGROUP(drive);
- u8 dma_stat = inb(hwif->dma_status);
+ u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
/* Restore a higher level driver's expiry handler first. */
hwgroup->expiry = expiry;
@@ -71,21 +71,24 @@ static int tc86c001_timer_expiry(ide_drive_t *drive)
if ((dma_stat & 5) == 1) { /* DMA active and no interrupt */
unsigned long sc_base = hwif->config_data;
unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
- u8 dma_cmd = inb(hwif->dma_command);
+ u8 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
printk(KERN_WARNING "%s: DMA interrupt possibly stuck, "
"attempting recovery...\n", drive->name);
/* Stop DMA */
- outb(dma_cmd & ~0x01, hwif->dma_command);
+ outb(dma_cmd & ~0x01, hwif->dma_base + ATA_DMA_CMD);
/* Setup the dummy DMA transfer */
outw(0, sc_base + 0x0a); /* Sector Count */
outw(0, twcr_port); /* Transfer Word Count 1 or 2 */
/* Start the dummy DMA transfer */
- outb(0x00, hwif->dma_command); /* clear R_OR_WCTR for write */
- outb(0x01, hwif->dma_command); /* set START_STOPBM */
+
+ /* clear R_OR_WCTR for write */
+ outb(0x00, hwif->dma_base + ATA_DMA_CMD);
+ /* set START_STOPBM */
+ outb(0x01, hwif->dma_base + ATA_DMA_CMD);
/*
* If an interrupt was pending, it should come thru shortly.
@@ -203,8 +206,7 @@ static const struct ide_port_info tc86c001_chipset __devinitdata = {
.init_hwif = init_hwif_tc86c001,
.port_ops = &tc86c001_port_ops,
.dma_ops = &tc86c001_dma_ops,
- .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD |
- IDE_HFLAG_ABUSE_SET_DMA_MODE,
+ .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA4,
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index e47384c70c4..09dc4803ef9 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -425,7 +425,6 @@ static const struct ide_port_info via82cxxx_chipset __devinitdata = {
.enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
.port_ops = &via_port_ops,
.host_flags = IDE_HFLAG_PIO_NO_BLACKLIST |
- IDE_HFLAG_ABUSE_SET_DMA_MODE |
IDE_HFLAG_POST_SET_MODE |
IDE_HFLAG_IO_32BIT,
.pio_mask = ATA_PIO5,
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 93fb9067c04..c521bf6e1bf 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -48,6 +48,8 @@
#include <asm/mediabay.h>
#endif
+#define DRV_NAME "ide-pmac"
+
#undef IDE_PMAC_DEBUG
#define DMA_WAIT_TIMEOUT 50
@@ -424,7 +426,9 @@ static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
static void
pmac_ide_selectproc(ide_drive_t *drive)
{
- pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
+ ide_hwif_t *hwif = drive->hwif;
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
if (pmif == NULL)
return;
@@ -444,7 +448,9 @@ pmac_ide_selectproc(ide_drive_t *drive)
static void
pmac_ide_kauai_selectproc(ide_drive_t *drive)
{
- pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
+ ide_hwif_t *hwif = drive->hwif;
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
if (pmif == NULL)
return;
@@ -465,7 +471,9 @@ pmac_ide_kauai_selectproc(ide_drive_t *drive)
static void
pmac_ide_do_update_timings(ide_drive_t *drive)
{
- pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
+ ide_hwif_t *hwif = drive->hwif;
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
if (pmif == NULL)
return;
@@ -478,12 +486,26 @@ pmac_ide_do_update_timings(ide_drive_t *drive)
pmac_ide_selectproc(drive);
}
-static void pmac_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port)
+static void pmac_exec_command(ide_hwif_t *hwif, u8 cmd)
{
- u32 tmp;
-
- writeb(value, (void __iomem *) port);
- tmp = readl((void __iomem *)(hwif->io_ports.data_addr
+ writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
+ (void)readl((void __iomem *)(hwif->io_ports.data_addr
+ + IDE_TIMING_CONFIG));
+}
+
+static void pmac_set_irq(ide_hwif_t *hwif, int on)
+{
+ u8 ctl = ATA_DEVCTL_OBS;
+
+ if (on == 4) { /* hack for SRST */
+ ctl |= 4;
+ on &= ~4;
+ }
+
+ ctl |= on ? 0 : 2;
+
+ writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
+ (void)readl((void __iomem *)(hwif->io_ports.data_addr
+ IDE_TIMING_CONFIG));
}
@@ -493,11 +515,13 @@ static void pmac_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port)
static void
pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
{
+ ide_hwif_t *hwif = drive->hwif;
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio);
u32 *timings, t;
unsigned accessTicks, recTicks;
unsigned accessTime, recTime;
- pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
unsigned int cycle_time;
if (pmif == NULL)
@@ -778,9 +802,11 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
{
+ ide_hwif_t *hwif = drive->hwif;
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
int unit = (drive->select.b.unit & 0x01);
int ret = 0;
- pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
u32 *timings, *timings2, tl[2];
timings = &pmif->timings[unit];
@@ -852,11 +878,8 @@ sanitize_timings(pmac_ide_hwif_t *pmif)
/* Suspend call back, should be called after the child devices
* have actually been suspended
*/
-static int
-pmac_ide_do_suspend(ide_hwif_t *hwif)
+static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif)
{
- pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
-
/* We clear the timings */
pmif->timings[0] = 0;
pmif->timings[1] = 0;
@@ -884,11 +907,8 @@ pmac_ide_do_suspend(ide_hwif_t *hwif)
/* Resume call back, should be called before the child devices
* are resumed
*/
-static int
-pmac_ide_do_resume(ide_hwif_t *hwif)
+static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif)
{
- pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
-
/* Hard reset & re-enable controller (do we really need to reset ? -BenH) */
if (!pmif->mediabay) {
ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
@@ -916,7 +936,8 @@ pmac_ide_do_resume(ide_hwif_t *hwif)
static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
{
- pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)ide_get_hwifdata(hwif);
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
struct device_node *np = pmif->node;
const char *cable = of_get_property(np, "cable-type", NULL);
@@ -936,7 +957,40 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
return ATA_CBL_PATA40;
}
+static void pmac_ide_init_dev(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+
+ if (pmif->mediabay) {
+#ifdef CONFIG_PMAC_MEDIABAY
+ if (check_media_bay_by_base(pmif->regbase, MB_CD) == 0) {
+ drive->noprobe = 0;
+ return;
+ }
+#endif
+ drive->noprobe = 1;
+ }
+}
+
+static const struct ide_tp_ops pmac_tp_ops = {
+ .exec_command = pmac_exec_command,
+ .read_status = ide_read_status,
+ .read_altstatus = ide_read_altstatus,
+ .read_sff_dma_status = ide_read_sff_dma_status,
+
+ .set_irq = pmac_set_irq,
+
+ .tf_load = ide_tf_load,
+ .tf_read = ide_tf_read,
+
+ .input_data = ide_input_data,
+ .output_data = ide_output_data,
+};
+
static const struct ide_port_ops pmac_ide_ata6_port_ops = {
+ .init_dev = pmac_ide_init_dev,
.set_pio_mode = pmac_ide_set_pio_mode,
.set_dma_mode = pmac_ide_set_dma_mode,
.selectproc = pmac_ide_kauai_selectproc,
@@ -944,6 +998,7 @@ static const struct ide_port_ops pmac_ide_ata6_port_ops = {
};
static const struct ide_port_ops pmac_ide_ata4_port_ops = {
+ .init_dev = pmac_ide_init_dev,
.set_pio_mode = pmac_ide_set_pio_mode,
.set_dma_mode = pmac_ide_set_dma_mode,
.selectproc = pmac_ide_selectproc,
@@ -951,6 +1006,7 @@ static const struct ide_port_ops pmac_ide_ata4_port_ops = {
};
static const struct ide_port_ops pmac_ide_port_ops = {
+ .init_dev = pmac_ide_init_dev,
.set_pio_mode = pmac_ide_set_pio_mode,
.set_dma_mode = pmac_ide_set_dma_mode,
.selectproc = pmac_ide_selectproc,
@@ -959,12 +1015,14 @@ static const struct ide_port_ops pmac_ide_port_ops = {
static const struct ide_dma_ops pmac_dma_ops;
static const struct ide_port_info pmac_port_info = {
+ .name = DRV_NAME,
.init_dma = pmac_ide_init_dma,
.chipset = ide_pmac,
+ .tp_ops = &pmac_tp_ops,
+ .port_ops = &pmac_ide_port_ops,
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
.dma_ops = &pmac_dma_ops,
#endif
- .port_ops = &pmac_ide_port_ops,
.host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
IDE_HFLAG_POST_SET_MODE |
IDE_HFLAG_MMIO |
@@ -977,13 +1035,15 @@ static const struct ide_port_info pmac_port_info = {
* Setup, register & probe an IDE channel driven by this driver, this is
* called by one of the 2 probe functions (macio or PCI).
*/
-static int __devinit
-pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
+static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw)
{
struct device_node *np = pmif->node;
const int *bidp;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ struct ide_host *host;
+ ide_hwif_t *hwif;
+ hw_regs_t *hws[] = { hw, NULL, NULL, NULL };
struct ide_port_info d = pmac_port_info;
+ int rc;
pmif->broken_dma = pmif->broken_dma_warn = 0;
if (of_device_is_compatible(np, "shasta-ata")) {
@@ -1054,31 +1114,16 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
}
- /* Setup MMIO ops */
- default_hwif_mmiops(hwif);
- hwif->OUTBSYNC = pmac_outbsync;
+ printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), "
+ "bus ID %d%s, irq %d\n", model_name[pmif->kind],
+ pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
+ pmif->mediabay ? " (mediabay)" : "", hw->irq);
- hwif->hwif_data = pmif;
- ide_init_port_hw(hwif, hw);
+ rc = ide_host_add(&d, hws, &host);
+ if (rc)
+ return rc;
- printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n",
- hwif->index, model_name[pmif->kind], pmif->aapl_bus_id,
- pmif->mediabay ? " (mediabay)" : "", hwif->irq);
-
- if (pmif->mediabay) {
-#ifdef CONFIG_PMAC_MEDIABAY
- if (check_media_bay_by_base(pmif->regbase, MB_CD)) {
-#else
- if (1) {
-#endif
- hwif->drives[0].noprobe = 1;
- hwif->drives[1].noprobe = 1;
- }
- }
-
- idx[0] = hwif->index;
-
- ide_device_add(idx, &d);
+ hwif = host->ports[0];
return 0;
}
@@ -1101,7 +1146,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
{
void __iomem *base;
unsigned long regbase;
- ide_hwif_t *hwif;
pmac_ide_hwif_t *pmif;
int irq, rc;
hw_regs_t hw;
@@ -1110,14 +1154,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
if (pmif == NULL)
return -ENOMEM;
- hwif = ide_find_port();
- if (hwif == NULL) {
- printk(KERN_ERR "ide-pmac: MacIO interface attach with no slot\n");
- printk(KERN_ERR " %s\n", mdev->ofdev.node->full_name);
- rc = -ENODEV;
- goto out_free_pmif;
- }
-
if (macio_resource_count(mdev) == 0) {
printk(KERN_WARNING "ide-pmac: no address for %s\n",
mdev->ofdev.node->full_name);
@@ -1164,7 +1200,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
} else
pmif->dma_regs = NULL;
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
- dev_set_drvdata(&mdev->ofdev.dev, hwif);
+ dev_set_drvdata(&mdev->ofdev.dev, pmif);
memset(&hw, 0, sizeof(hw));
pmac_ide_init_ports(&hw, pmif->regbase);
@@ -1172,7 +1208,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
hw.dev = &mdev->bus->pdev->dev;
hw.parent = &mdev->ofdev.dev;
- rc = pmac_ide_setup_device(pmif, hwif, &hw);
+ rc = pmac_ide_setup_device(pmif, &hw);
if (rc != 0) {
/* The inteface is released to the common IDE layer */
dev_set_drvdata(&mdev->ofdev.dev, NULL);
@@ -1195,12 +1231,13 @@ out_free_pmif:
static int
pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
{
- ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
- int rc = 0;
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
+ int rc = 0;
if (mesg.event != mdev->ofdev.dev.power.power_state.event
&& (mesg.event & PM_EVENT_SLEEP)) {
- rc = pmac_ide_do_suspend(hwif);
+ rc = pmac_ide_do_suspend(pmif);
if (rc == 0)
mdev->ofdev.dev.power.power_state = mesg;
}
@@ -1211,11 +1248,12 @@ pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
static int
pmac_ide_macio_resume(struct macio_dev *mdev)
{
- ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
- int rc = 0;
-
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
+ int rc = 0;
+
if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) {
- rc = pmac_ide_do_resume(hwif);
+ rc = pmac_ide_do_resume(pmif);
if (rc == 0)
mdev->ofdev.dev.power.power_state = PMSG_ON;
}
@@ -1229,7 +1267,6 @@ pmac_ide_macio_resume(struct macio_dev *mdev)
static int __devinit
pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
{
- ide_hwif_t *hwif;
struct device_node *np;
pmac_ide_hwif_t *pmif;
void __iomem *base;
@@ -1247,14 +1284,6 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
if (pmif == NULL)
return -ENOMEM;
- hwif = ide_find_port();
- if (hwif == NULL) {
- printk(KERN_ERR "ide-pmac: PCI interface attach with no slot\n");
- printk(KERN_ERR " %s\n", np->full_name);
- rc = -ENODEV;
- goto out_free_pmif;
- }
-
if (pci_enable_device(pdev)) {
printk(KERN_WARNING "ide-pmac: Can't enable PCI device for "
"%s\n", np->full_name);
@@ -1284,14 +1313,14 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
pmif->kauai_fcr = base;
pmif->irq = pdev->irq;
- pci_set_drvdata(pdev, hwif);
+ pci_set_drvdata(pdev, pmif);
memset(&hw, 0, sizeof(hw));
pmac_ide_init_ports(&hw, pmif->regbase);
hw.irq = pdev->irq;
hw.dev = &pdev->dev;
- rc = pmac_ide_setup_device(pmif, hwif, &hw);
+ rc = pmac_ide_setup_device(pmif, &hw);
if (rc != 0) {
/* The inteface is released to the common IDE layer */
pci_set_drvdata(pdev, NULL);
@@ -1310,12 +1339,12 @@ out_free_pmif:
static int
pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
- ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev);
- int rc = 0;
-
+ pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)pci_get_drvdata(pdev);
+ int rc = 0;
+
if (mesg.event != pdev->dev.power.power_state.event
&& (mesg.event & PM_EVENT_SLEEP)) {
- rc = pmac_ide_do_suspend(hwif);
+ rc = pmac_ide_do_suspend(pmif);
if (rc == 0)
pdev->dev.power.power_state = mesg;
}
@@ -1326,11 +1355,11 @@ pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
static int
pmac_ide_pci_resume(struct pci_dev *pdev)
{
- ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev);
- int rc = 0;
-
+ pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)pci_get_drvdata(pdev);
+ int rc = 0;
+
if (pdev->dev.power.power_state.event != PM_EVENT_ON) {
- rc = pmac_ide_do_resume(hwif);
+ rc = pmac_ide_do_resume(pmif);
if (rc == 0)
pdev->dev.power.power_state = PMSG_ON;
}
@@ -1421,10 +1450,11 @@ out:
static int
pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
{
+ ide_hwif_t *hwif = drive->hwif;
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
struct dbdma_cmd *table;
int i, count = 0;
- ide_hwif_t *hwif = HWIF(drive);
- pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
struct scatterlist *sg;
int wr = (rq_data_dir(rq) == WRITE);
@@ -1520,7 +1550,8 @@ static int
pmac_ide_dma_setup(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
- pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
struct request *rq = HWGROUP(drive)->rq;
u8 unit = (drive->select.b.unit & 0x01);
u8 ata4;
@@ -1560,7 +1591,9 @@ pmac_ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
static void
pmac_ide_dma_start(ide_drive_t *drive)
{
- pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
+ ide_hwif_t *hwif = drive->hwif;
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
volatile struct dbdma_regs __iomem *dma;
dma = pmif->dma_regs;
@@ -1576,7 +1609,9 @@ pmac_ide_dma_start(ide_drive_t *drive)
static int
pmac_ide_dma_end (ide_drive_t *drive)
{
- pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
+ ide_hwif_t *hwif = drive->hwif;
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
volatile struct dbdma_regs __iomem *dma;
u32 dstat;
@@ -1604,7 +1639,9 @@ pmac_ide_dma_end (ide_drive_t *drive)
static int
pmac_ide_dma_test_irq (ide_drive_t *drive)
{
- pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
+ ide_hwif_t *hwif = drive->hwif;
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
volatile struct dbdma_regs __iomem *dma;
unsigned long status, timeout;
@@ -1664,7 +1701,9 @@ static void pmac_ide_dma_host_set(ide_drive_t *drive, int on)
static void
pmac_ide_dma_lost_irq (ide_drive_t *drive)
{
- pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
+ ide_hwif_t *hwif = drive->hwif;
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
volatile struct dbdma_regs __iomem *dma;
unsigned long status;
@@ -1694,7 +1733,8 @@ static const struct ide_dma_ops pmac_dma_ops = {
static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
const struct ide_port_info *d)
{
- pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
struct pci_dev *dev = to_pci_dev(hwif->dev);
/* We won't need pci_dev if we switch to generic consistent
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 65fc08b6b6d..b15cad58dc8 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -73,15 +73,12 @@ static void ide_pci_clear_simplex(unsigned long dma_base, const char *name)
* @d: IDE port info
*
* Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space.
- * Where a device has a partner that is already in DMA mode we check
- * and enforce IDE simplex rules.
*/
unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long dma_base = 0;
- u8 dma_stat = 0;
if (hwif->host_flags & IDE_HFLAG_MMIO)
return hwif->dma_base;
@@ -102,11 +99,19 @@ unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
if (hwif->channel)
dma_base += 8;
- if (d->host_flags & IDE_HFLAG_CS5520)
+ return dma_base;
+}
+EXPORT_SYMBOL_GPL(ide_pci_dma_base);
+
+int ide_pci_check_simplex(ide_hwif_t *hwif, const struct ide_port_info *d)
+{
+ u8 dma_stat;
+
+ if (d->host_flags & (IDE_HFLAG_MMIO | IDE_HFLAG_CS5520))
goto out;
if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) {
- ide_pci_clear_simplex(dma_base, d->name);
+ ide_pci_clear_simplex(hwif->dma_base, d->name);
goto out;
}
@@ -120,15 +125,15 @@ unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
* we tune the drive then try to grab DMA ownership if we want to be
* the DMA end. This has to be become dynamic to handle hot-plug.
*/
- dma_stat = hwif->INB(dma_base + 2);
+ dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) {
printk(KERN_INFO "%s: simplex device: DMA disabled\n", d->name);
- dma_base = 0;
+ return -1;
}
out:
- return dma_base;
+ return 0;
}
-EXPORT_SYMBOL_GPL(ide_pci_dma_base);
+EXPORT_SYMBOL_GPL(ide_pci_check_simplex);
/*
* Set up BM-DMA capability (PnP BIOS should have done this)
@@ -284,33 +289,31 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *
}
/**
- * ide_hwif_configure - configure an IDE interface
+ * ide_hw_configure - configure a hw_regs_t instance
* @dev: PCI device holding interface
* @d: IDE port info
* @port: port number
* @irq: PCI IRQ
+ * @hw: hw_regs_t instance corresponding to this port
*
* Perform the initial set up for the hardware interface structure. This
* is done per interface port rather than per PCI device. There may be
* more than one port per device.
*
- * Returns the new hardware interface structure, or NULL on a failure
+ * Returns zero on success or an error code.
*/
-static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
- const struct ide_port_info *d,
- unsigned int port, int irq)
+static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
+ unsigned int port, int irq, hw_regs_t *hw)
{
unsigned long ctl = 0, base = 0;
- ide_hwif_t *hwif;
- struct hw_regs_s hw;
if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
if (ide_pci_check_iomem(dev, d, 2 * port) ||
ide_pci_check_iomem(dev, d, 2 * port + 1)) {
printk(KERN_ERR "%s: I/O baseregs (BIOS) are reported "
"as MEM for port %d!\n", d->name, port);
- return NULL;
+ return -EINVAL;
}
ctl = pci_resource_start(dev, 2*port+1);
@@ -324,22 +327,16 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
if (!base || !ctl) {
printk(KERN_ERR "%s: bad PCI BARs for port %d, skipping\n",
d->name, port);
- return NULL;
+ return -EINVAL;
}
- hwif = ide_find_port_slot(d);
- if (hwif == NULL)
- return NULL;
-
- memset(&hw, 0, sizeof(hw));
- hw.irq = irq;
- hw.dev = &dev->dev;
- hw.chipset = d->chipset ? d->chipset : ide_pci;
- ide_std_init_ports(&hw, base, ctl | 2);
-
- ide_init_port_hw(hwif, &hw);
+ memset(hw, 0, sizeof(*hw));
+ hw->irq = irq;
+ hw->dev = &dev->dev;
+ hw->chipset = d->chipset ? d->chipset : ide_pci;
+ ide_std_init_ports(hw, base, ctl | 2);
- return hwif;
+ return 0;
}
#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
@@ -362,7 +359,15 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
(dev->class & 0x80))) {
unsigned long base = ide_pci_dma_base(hwif, d);
- if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
+ if (base == 0)
+ return -1;
+
+ hwif->dma_base = base;
+
+ if (ide_pci_check_simplex(hwif, d) < 0)
+ return -1;
+
+ if (ide_pci_set_master(dev, d->name) < 0)
return -1;
if (hwif->host_flags & IDE_HFLAG_MMIO)
@@ -376,7 +381,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
if (ide_allocate_dma_engine(hwif))
return -1;
- ide_setup_dma(hwif, base);
+ hwif->dma_ops = &sff_dma_ops;
}
return 0;
@@ -429,7 +434,8 @@ out:
* @dev: PCI device
* @d: IDE port info
* @pciirq: IRQ line
- * @idx: ATA index table to update
+ * @hw: hw_regs_t instances corresponding to this PCI IDE device
+ * @hws: hw_regs_t pointers table to update
*
* Scan the interfaces attached to this device and do any
* necessary per port setup. Attach the devices and ask the
@@ -440,10 +446,10 @@ out:
* where the chipset setup is not the default PCI IDE one.
*/
-void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int pciirq, u8 *idx)
+void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
+ int pciirq, hw_regs_t *hw, hw_regs_t **hws)
{
int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
- ide_hwif_t *hwif;
u8 tmp;
/*
@@ -459,11 +465,10 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int
continue; /* port not enabled */
}
- hwif = ide_hwif_configure(dev, d, port, pciirq);
- if (hwif == NULL)
+ if (ide_hw_configure(dev, d, port, pciirq, hw + port))
continue;
- *(idx + port) = hwif->index;
+ *(hws + port) = hw + port;
}
}
EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
@@ -480,7 +485,7 @@ EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
*/
static int do_ide_setup_pci_device(struct pci_dev *dev,
const struct ide_port_info *d,
- u8 *idx, u8 noisy)
+ u8 noisy)
{
int tried_config = 0;
int pciirq, ret;
@@ -529,22 +534,24 @@ static int do_ide_setup_pci_device(struct pci_dev *dev,
d->name, pciirq);
}
- /* FIXME: silent failure can happen */
-
- ide_pci_setup_ports(dev, d, pciirq, idx);
+ ret = pciirq;
out:
return ret;
}
int ide_setup_pci_device(struct pci_dev *dev, const struct ide_port_info *d)
{
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
int ret;
- ret = do_ide_setup_pci_device(dev, d, &idx[0], 1);
+ ret = do_ide_setup_pci_device(dev, d, 1);
+
+ if (ret >= 0) {
+ /* FIXME: silent failure can happen */
+ ide_pci_setup_ports(dev, d, ret, &hw[0], &hws[0]);
- if (ret >= 0)
- ide_device_add(idx, d);
+ ret = ide_host_add(d, hws, NULL);
+ }
return ret;
}
@@ -555,19 +562,23 @@ int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2,
{
struct pci_dev *pdev[] = { dev1, dev2 };
int ret, i;
- u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
for (i = 0; i < 2; i++) {
- ret = do_ide_setup_pci_device(pdev[i], d, &idx[i*2], !i);
+ ret = do_ide_setup_pci_device(pdev[i], d, !i);
+
/*
* FIXME: Mom, mom, they stole me the helper function to undo
* do_ide_setup_pci_device() on the first device!
*/
if (ret < 0)
goto out;
+
+ /* FIXME: silent failure can happen */
+ ide_pci_setup_ports(pdev[i], d, ret, &hw[i*2], &hws[i*2]);
}
- ide_device_add(idx, d);
+ ret = ide_host_add(d, hws, NULL);
out:
return ret;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 0792d930c48..7a64aa9b51b 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -646,8 +646,8 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
spin_lock_irqsave(&pool->last_cpu_lock, flags);
- cpu = next_cpu(pool->last_cpu, cpu_online_map);
- if (cpu == NR_CPUS)
+ cpu = next_cpu_nr(pool->last_cpu, cpu_online_map);
+ if (cpu >= nr_cpu_ids)
cpu = first_cpu(cpu_online_map);
pool->last_cpu = cpu;
spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
diff --git a/drivers/input/keyboard/tosakbd.c b/drivers/input/keyboard/tosakbd.c
index 94e444b4ee1..b12b7ee4b6a 100644
--- a/drivers/input/keyboard/tosakbd.c
+++ b/drivers/input/keyboard/tosakbd.c
@@ -215,8 +215,6 @@ static int tosakbd_suspend(struct platform_device *dev, pm_message_t state)
unsigned long flags;
spin_lock_irqsave(&tosakbd->lock, flags);
- PGSR1 = (PGSR1 & ~TOSA_GPIO_LOW_STROBE_BIT);
- PGSR2 = (PGSR2 & ~TOSA_GPIO_HIGH_STROBE_BIT);
tosakbd->suspended = 1;
spin_unlock_irqrestore(&tosakbd->lock, flags);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 260bade0a5e..9f93c29fed3 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -5,6 +5,10 @@
menu "Multifunction device drivers"
depends on HAS_IOMEM
+config MFD_CORE
+ tristate
+ default n
+
config MFD_SM501
tristate "Support for Silicon Motion SM501"
---help---
@@ -38,6 +42,13 @@ config HTC_PASIC3
HTC Magician devices, respectively. Actual functionality is
handled by the leds-pasic3 and ds1wm drivers.
+config MFD_TC6393XB
+ bool "Support Toshiba TC6393XB"
+ depends on HAVE_GPIO_LIB
+ select MFD_CORE
+ help
+ Support for Toshiba Mobile IO Controller TC6393XB
+
endmenu
menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index eef4e26807d..33daa2f45dd 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -8,6 +8,10 @@ obj-$(CONFIG_MFD_ASIC3) += asic3.o
obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
+obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o
+
+obj-$(CONFIG_MFD_CORE) += mfd-core.o
+
obj-$(CONFIG_MCP) += mcp-core.o
obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o
obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
new file mode 100644
index 00000000000..d7d88ce053a
--- /dev/null
+++ b/drivers/mfd/mfd-core.c
@@ -0,0 +1,114 @@
+/*
+ * drivers/mfd/mfd-core.c
+ *
+ * core MFD support
+ * Copyright (c) 2006 Ian Molton
+ * Copyright (c) 2007,2008 Dmitry Baryshkov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+
+static int mfd_add_device(struct platform_device *parent,
+ const struct mfd_cell *cell,
+ struct resource *mem_base,
+ int irq_base)
+{
+ struct resource res[cell->num_resources];
+ struct platform_device *pdev;
+ int ret = -ENOMEM;
+ int r;
+
+ pdev = platform_device_alloc(cell->name, parent->id);
+ if (!pdev)
+ goto fail_alloc;
+
+ pdev->dev.parent = &parent->dev;
+
+ ret = platform_device_add_data(pdev,
+ cell, sizeof(struct mfd_cell));
+ if (ret)
+ goto fail_device;
+
+ memzero(res, sizeof(res));
+ for (r = 0; r < cell->num_resources; r++) {
+ res[r].name = cell->resources[r].name;
+ res[r].flags = cell->resources[r].flags;
+
+ /* Find out base to use */
+ if (cell->resources[r].flags & IORESOURCE_MEM) {
+ res[r].parent = mem_base;
+ res[r].start = mem_base->start +
+ cell->resources[r].start;
+ res[r].end = mem_base->start +
+ cell->resources[r].end;
+ } else if (cell->resources[r].flags & IORESOURCE_IRQ) {
+ res[r].start = irq_base +
+ cell->resources[r].start;
+ res[r].end = irq_base +
+ cell->resources[r].end;
+ } else {
+ res[r].parent = cell->resources[r].parent;
+ res[r].start = cell->resources[r].start;
+ res[r].end = cell->resources[r].end;
+ }
+ }
+
+ platform_device_add_resources(pdev, res, cell->num_resources);
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto fail_device;
+
+ return 0;
+
+/* platform_device_del(pdev); */
+fail_device:
+ platform_device_put(pdev);
+fail_alloc:
+ return ret;
+}
+
+int mfd_add_devices(
+ struct platform_device *parent,
+ const struct mfd_cell *cells, int n_devs,
+ struct resource *mem_base,
+ int irq_base)
+{
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < n_devs; i++) {
+ ret = mfd_add_device(parent, cells + i, mem_base, irq_base);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ mfd_remove_devices(parent);
+
+ return ret;
+}
+EXPORT_SYMBOL(mfd_add_devices);
+
+static int mfd_remove_devices_fn(struct device *dev, void *unused)
+{
+ platform_device_unregister(
+ container_of(dev, struct platform_device, dev));
+ return 0;
+}
+
+void mfd_remove_devices(struct platform_device *parent)
+{
+ device_for_each_child(&parent->dev, NULL, mfd_remove_devices_fn);
+}
+EXPORT_SYMBOL(mfd_remove_devices);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
new file mode 100644
index 00000000000..2d87501b6fd
--- /dev/null
+++ b/drivers/mfd/tc6393xb.c
@@ -0,0 +1,600 @@
+/*
+ * Toshiba TC6393XB SoC support
+ *
+ * Copyright(c) 2005-2006 Chris Humbert
+ * Copyright(c) 2005 Dirk Opfer
+ * Copyright(c) 2005 Ian Molton <spyro@f2s.com>
+ * Copyright(c) 2007 Dmitry Baryshkov
+ *
+ * Based on code written by Sharp/Lineo for 2.4 kernels
+ * Based on locomo.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/fb.h>
+#include <linux/clk.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tmio.h>
+#include <linux/mfd/tc6393xb.h>
+#include <linux/gpio.h>
+
+#define SCR_REVID 0x08 /* b Revision ID */
+#define SCR_ISR 0x50 /* b Interrupt Status */
+#define SCR_IMR 0x52 /* b Interrupt Mask */
+#define SCR_IRR 0x54 /* b Interrupt Routing */
+#define SCR_GPER 0x60 /* w GP Enable */
+#define SCR_GPI_SR(i) (0x64 + (i)) /* b3 GPI Status */
+#define SCR_GPI_IMR(i) (0x68 + (i)) /* b3 GPI INT Mask */
+#define SCR_GPI_EDER(i) (0x6c + (i)) /* b3 GPI Edge Detect Enable */
+#define SCR_GPI_LIR(i) (0x70 + (i)) /* b3 GPI Level Invert */
+#define SCR_GPO_DSR(i) (0x78 + (i)) /* b3 GPO Data Set */
+#define SCR_GPO_DOECR(i) (0x7c + (i)) /* b3 GPO Data OE Control */
+#define SCR_GP_IARCR(i) (0x80 + (i)) /* b3 GP Internal Active Register Control */
+#define SCR_GP_IARLCR(i) (0x84 + (i)) /* b3 GP INTERNAL Active Register Level Control */
+#define SCR_GPI_BCR(i) (0x88 + (i)) /* b3 GPI Buffer Control */
+#define SCR_GPA_IARCR 0x8c /* w GPa Internal Active Register Control */
+#define SCR_GPA_IARLCR 0x90 /* w GPa Internal Active Register Level Control */
+#define SCR_GPA_BCR 0x94 /* w GPa Buffer Control */
+#define SCR_CCR 0x98 /* w Clock Control */
+#define SCR_PLL2CR 0x9a /* w PLL2 Control */
+#define SCR_PLL1CR 0x9c /* l PLL1 Control */
+#define SCR_DIARCR 0xa0 /* b Device Internal Active Register Control */
+#define SCR_DBOCR 0xa1 /* b Device Buffer Off Control */
+#define SCR_FER 0xe0 /* b Function Enable */
+#define SCR_MCR 0xe4 /* w Mode Control */
+#define SCR_CONFIG 0xfc /* b Configuration Control */
+#define SCR_DEBUG 0xff /* b Debug */
+
+#define SCR_CCR_CK32K BIT(0)
+#define SCR_CCR_USBCK BIT(1)
+#define SCR_CCR_UNK1 BIT(4)
+#define SCR_CCR_MCLK_MASK (7 << 8)
+#define SCR_CCR_MCLK_OFF (0 << 8)
+#define SCR_CCR_MCLK_12 (1 << 8)
+#define SCR_CCR_MCLK_24 (2 << 8)
+#define SCR_CCR_MCLK_48 (3 << 8)
+#define SCR_CCR_HCLK_MASK (3 << 12)
+#define SCR_CCR_HCLK_24 (0 << 12)
+#define SCR_CCR_HCLK_48 (1 << 12)
+
+#define SCR_FER_USBEN BIT(0) /* USB host enable */
+#define SCR_FER_LCDCVEN BIT(1) /* polysilicon TFT enable */
+#define SCR_FER_SLCDEN BIT(2) /* SLCD enable */
+
+#define SCR_MCR_RDY_MASK (3 << 0)
+#define SCR_MCR_RDY_OPENDRAIN (0 << 0)
+#define SCR_MCR_RDY_TRISTATE (1 << 0)
+#define SCR_MCR_RDY_PUSHPULL (2 << 0)
+#define SCR_MCR_RDY_UNK BIT(2)
+#define SCR_MCR_RDY_EN BIT(3)
+#define SCR_MCR_INT_MASK (3 << 4)
+#define SCR_MCR_INT_OPENDRAIN (0 << 4)
+#define SCR_MCR_INT_TRISTATE (1 << 4)
+#define SCR_MCR_INT_PUSHPULL (2 << 4)
+#define SCR_MCR_INT_UNK BIT(6)
+#define SCR_MCR_INT_EN BIT(7)
+/* bits 8 - 16 are unknown */
+
+#define TC_GPIO_BIT(i) (1 << (i & 0x7))
+
+/*--------------------------------------------------------------------------*/
+
+struct tc6393xb {
+ void __iomem *scr;
+
+ struct gpio_chip gpio;
+
+ struct clk *clk; /* 3,6 Mhz */
+
+ spinlock_t lock; /* protects RMW cycles */
+
+ struct {
+ u8 fer;
+ u16 ccr;
+ u8 gpi_bcr[3];
+ u8 gpo_dsr[3];
+ u8 gpo_doecr[3];
+ } suspend_state;
+
+ struct resource rscr;
+ struct resource *iomem;
+ int irq;
+ int irq_base;
+};
+
+enum {
+ TC6393XB_CELL_NAND,
+};
+
+/*--------------------------------------------------------------------------*/
+
+static int tc6393xb_nand_enable(struct platform_device *nand)
+{
+ struct platform_device *dev = to_platform_device(nand->dev.parent);
+ struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tc6393xb->lock, flags);
+
+ /* SMD buffer on */
+ dev_dbg(&dev->dev, "SMD buffer on\n");
+ iowrite8(0xff, tc6393xb->scr + SCR_GPI_BCR(1));
+
+ spin_unlock_irqrestore(&tc6393xb->lock, flags);
+
+ return 0;
+}
+
+static struct resource __devinitdata tc6393xb_nand_resources[] = {
+ {
+ .name = TMIO_NAND_CONFIG,
+ .start = 0x0100,
+ .end = 0x01ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = TMIO_NAND_CONTROL,
+ .start = 0x1000,
+ .end = 0x1007,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = TMIO_NAND_IRQ,
+ .start = IRQ_TC6393_NAND,
+ .end = IRQ_TC6393_NAND,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell __devinitdata tc6393xb_cells[] = {
+ [TC6393XB_CELL_NAND] = {
+ .name = "tmio-nand",
+ .enable = tc6393xb_nand_enable,
+ .num_resources = ARRAY_SIZE(tc6393xb_nand_resources),
+ .resources = tc6393xb_nand_resources,
+ },
+};
+
+/*--------------------------------------------------------------------------*/
+
+static int tc6393xb_gpio_get(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
+
+ /* XXX: does dsr also represent inputs? */
+ return ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8))
+ & TC_GPIO_BIT(offset);
+}
+
+static void __tc6393xb_gpio_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
+ u8 dsr;
+
+ dsr = ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8));
+ if (value)
+ dsr |= TC_GPIO_BIT(offset);
+ else
+ dsr &= ~TC_GPIO_BIT(offset);
+
+ iowrite8(dsr, tc6393xb->scr + SCR_GPO_DSR(offset / 8));
+}
+
+static void tc6393xb_gpio_set(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tc6393xb->lock, flags);
+
+ __tc6393xb_gpio_set(chip, offset, value);
+
+ spin_unlock_irqrestore(&tc6393xb->lock, flags);
+}
+
+static int tc6393xb_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
+ unsigned long flags;
+ u8 doecr;
+
+ spin_lock_irqsave(&tc6393xb->lock, flags);
+
+ doecr = ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
+ doecr &= ~TC_GPIO_BIT(offset);
+ iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
+
+ spin_unlock_irqrestore(&tc6393xb->lock, flags);
+
+ return 0;
+}
+
+static int tc6393xb_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
+ unsigned long flags;
+ u8 doecr;
+
+ spin_lock_irqsave(&tc6393xb->lock, flags);
+
+ __tc6393xb_gpio_set(chip, offset, value);
+
+ doecr = ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
+ doecr |= TC_GPIO_BIT(offset);
+ iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
+
+ spin_unlock_irqrestore(&tc6393xb->lock, flags);
+
+ return 0;
+}
+
+static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base)
+{
+ tc6393xb->gpio.label = "tc6393xb";
+ tc6393xb->gpio.base = gpio_base;
+ tc6393xb->gpio.ngpio = 16;
+ tc6393xb->gpio.set = tc6393xb_gpio_set;
+ tc6393xb->gpio.get = tc6393xb_gpio_get;
+ tc6393xb->gpio.direction_input = tc6393xb_gpio_direction_input;
+ tc6393xb->gpio.direction_output = tc6393xb_gpio_direction_output;
+
+ return gpiochip_add(&tc6393xb->gpio);
+}
+
+/*--------------------------------------------------------------------------*/
+
+static void
+tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct tc6393xb *tc6393xb = get_irq_data(irq);
+ unsigned int isr;
+ unsigned int i, irq_base;
+
+ irq_base = tc6393xb->irq_base;
+
+ while ((isr = ioread8(tc6393xb->scr + SCR_ISR) &
+ ~ioread8(tc6393xb->scr + SCR_IMR)))
+ for (i = 0; i < TC6393XB_NR_IRQS; i++) {
+ if (isr & (1 << i))
+ generic_handle_irq(irq_base + i);
+ }
+}
+
+static void tc6393xb_irq_ack(unsigned int irq)
+{
+}
+
+static void tc6393xb_irq_mask(unsigned int irq)
+{
+ struct tc6393xb *tc6393xb = get_irq_chip_data(irq);
+ unsigned long flags;
+ u8 imr;
+
+ spin_lock_irqsave(&tc6393xb->lock, flags);
+ imr = ioread8(tc6393xb->scr + SCR_IMR);
+ imr |= 1 << (irq - tc6393xb->irq_base);
+ iowrite8(imr, tc6393xb->scr + SCR_IMR);
+ spin_unlock_irqrestore(&tc6393xb->lock, flags);
+}
+
+static void tc6393xb_irq_unmask(unsigned int irq)
+{
+ struct tc6393xb *tc6393xb = get_irq_chip_data(irq);
+ unsigned long flags;
+ u8 imr;
+
+ spin_lock_irqsave(&tc6393xb->lock, flags);
+ imr = ioread8(tc6393xb->scr + SCR_IMR);
+ imr &= ~(1 << (irq - tc6393xb->irq_base));
+ iowrite8(imr, tc6393xb->scr + SCR_IMR);
+ spin_unlock_irqrestore(&tc6393xb->lock, flags);
+}
+
+static struct irq_chip tc6393xb_chip = {
+ .name = "tc6393xb",
+ .ack = tc6393xb_irq_ack,
+ .mask = tc6393xb_irq_mask,
+ .unmask = tc6393xb_irq_unmask,
+};
+
+static void tc6393xb_attach_irq(struct platform_device *dev)
+{
+ struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ unsigned int irq, irq_base;
+
+ irq_base = tc6393xb->irq_base;
+
+ for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
+ set_irq_chip(irq, &tc6393xb_chip);
+ set_irq_chip_data(irq, tc6393xb);
+ set_irq_handler(irq, handle_edge_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+
+ set_irq_type(tc6393xb->irq, IRQT_FALLING);
+ set_irq_data(tc6393xb->irq, tc6393xb);
+ set_irq_chained_handler(tc6393xb->irq, tc6393xb_irq);
+}
+
+static void tc6393xb_detach_irq(struct platform_device *dev)
+{
+ struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ unsigned int irq, irq_base;
+
+ set_irq_chained_handler(tc6393xb->irq, NULL);
+ set_irq_data(tc6393xb->irq, NULL);
+
+ irq_base = tc6393xb->irq_base;
+
+ for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
+ set_irq_flags(irq, 0);
+ set_irq_chip(irq, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+}
+
+/*--------------------------------------------------------------------------*/
+
+static int tc6393xb_hw_init(struct platform_device *dev)
+{
+ struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
+ struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ int i;
+
+ iowrite8(tc6393xb->suspend_state.fer, tc6393xb->scr + SCR_FER);
+ iowrite16(tcpd->scr_pll2cr, tc6393xb->scr + SCR_PLL2CR);
+ iowrite16(tc6393xb->suspend_state.ccr, tc6393xb->scr + SCR_CCR);
+ iowrite16(SCR_MCR_RDY_OPENDRAIN | SCR_MCR_RDY_UNK | SCR_MCR_RDY_EN |
+ SCR_MCR_INT_OPENDRAIN | SCR_MCR_INT_UNK | SCR_MCR_INT_EN |
+ BIT(15), tc6393xb->scr + SCR_MCR);
+ iowrite16(tcpd->scr_gper, tc6393xb->scr + SCR_GPER);
+ iowrite8(0, tc6393xb->scr + SCR_IRR);
+ iowrite8(0xbf, tc6393xb->scr + SCR_IMR);
+
+ for (i = 0; i < 3; i++) {
+ iowrite8(tc6393xb->suspend_state.gpo_dsr[i],
+ tc6393xb->scr + SCR_GPO_DSR(i));
+ iowrite8(tc6393xb->suspend_state.gpo_doecr[i],
+ tc6393xb->scr + SCR_GPO_DOECR(i));
+ iowrite8(tc6393xb->suspend_state.gpi_bcr[i],
+ tc6393xb->scr + SCR_GPI_BCR(i));
+ }
+
+ return 0;
+}
+
+static int __devinit tc6393xb_probe(struct platform_device *dev)
+{
+ struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
+ struct tc6393xb *tc6393xb;
+ struct resource *iomem;
+ struct resource *rscr;
+ int retval, temp;
+ int i;
+
+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!iomem)
+ return -EINVAL;
+
+ tc6393xb = kzalloc(sizeof *tc6393xb, GFP_KERNEL);
+ if (!tc6393xb) {
+ retval = -ENOMEM;
+ goto err_kzalloc;
+ }
+
+ spin_lock_init(&tc6393xb->lock);
+
+ platform_set_drvdata(dev, tc6393xb);
+ tc6393xb->iomem = iomem;
+ tc6393xb->irq = platform_get_irq(dev, 0);
+ tc6393xb->irq_base = tcpd->irq_base;
+
+ tc6393xb->clk = clk_get(&dev->dev, "GPIO27_CLK" /* "CK3P6MI" */);
+ if (IS_ERR(tc6393xb->clk)) {
+ retval = PTR_ERR(tc6393xb->clk);
+ goto err_clk_get;
+ }
+
+ rscr = &tc6393xb->rscr;
+ rscr->name = "tc6393xb-core";
+ rscr->start = iomem->start;
+ rscr->end = iomem->start + 0xff;
+ rscr->flags = IORESOURCE_MEM;
+
+ retval = request_resource(iomem, rscr);
+ if (retval)
+ goto err_request_scr;
+
+ tc6393xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1);
+ if (!tc6393xb->scr) {
+ retval = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ retval = clk_enable(tc6393xb->clk);
+ if (retval)
+ goto err_clk_enable;
+
+ retval = tcpd->enable(dev);
+ if (retval)
+ goto err_enable;
+
+ tc6393xb->suspend_state.fer = 0;
+ for (i = 0; i < 3; i++) {
+ tc6393xb->suspend_state.gpo_dsr[i] =
+ (tcpd->scr_gpo_dsr >> (8 * i)) & 0xff;
+ tc6393xb->suspend_state.gpo_doecr[i] =
+ (tcpd->scr_gpo_doecr >> (8 * i)) & 0xff;
+ }
+ /*
+ * It may be necessary to change this back to
+ * platform-dependant code
+ */
+ tc6393xb->suspend_state.ccr = SCR_CCR_UNK1 |
+ SCR_CCR_HCLK_48;
+
+ retval = tc6393xb_hw_init(dev);
+ if (retval)
+ goto err_hw_init;
+
+ printk(KERN_INFO "Toshiba tc6393xb revision %d at 0x%08lx, irq %d\n",
+ ioread8(tc6393xb->scr + SCR_REVID),
+ (unsigned long) iomem->start, tc6393xb->irq);
+
+ tc6393xb->gpio.base = -1;
+
+ if (tcpd->gpio_base >= 0) {
+ retval = tc6393xb_register_gpio(tc6393xb, tcpd->gpio_base);
+ if (retval)
+ goto err_gpio_add;
+ }
+
+ if (tc6393xb->irq)
+ tc6393xb_attach_irq(dev);
+
+ tc6393xb_cells[TC6393XB_CELL_NAND].driver_data = tcpd->nand_data;
+
+ retval = mfd_add_devices(dev,
+ tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells),
+ iomem, tcpd->irq_base);
+
+ return 0;
+
+ if (tc6393xb->irq)
+ tc6393xb_detach_irq(dev);
+
+err_gpio_add:
+ if (tc6393xb->gpio.base != -1)
+ temp = gpiochip_remove(&tc6393xb->gpio);
+err_hw_init:
+ tcpd->disable(dev);
+err_clk_enable:
+ clk_disable(tc6393xb->clk);
+err_enable:
+ iounmap(tc6393xb->scr);
+err_ioremap:
+ release_resource(&tc6393xb->rscr);
+err_request_scr:
+ clk_put(tc6393xb->clk);
+err_clk_get:
+ kfree(tc6393xb);
+err_kzalloc:
+ return retval;
+}
+
+static int __devexit tc6393xb_remove(struct platform_device *dev)
+{
+ struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
+ struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ int ret;
+
+ mfd_remove_devices(dev);
+
+ if (tc6393xb->irq)
+ tc6393xb_detach_irq(dev);
+
+ if (tc6393xb->gpio.base != -1) {
+ ret = gpiochip_remove(&tc6393xb->gpio);
+ if (ret) {
+ dev_err(&dev->dev, "Can't remove gpio chip: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = tcpd->disable(dev);
+
+ clk_disable(tc6393xb->clk);
+
+ iounmap(tc6393xb->scr);
+
+ release_resource(&tc6393xb->rscr);
+
+ platform_set_drvdata(dev, NULL);
+
+ clk_put(tc6393xb->clk);
+
+ kfree(tc6393xb);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
+ struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+ int i;
+
+
+ tc6393xb->suspend_state.ccr = ioread16(tc6393xb->scr + SCR_CCR);
+ tc6393xb->suspend_state.fer = ioread8(tc6393xb->scr + SCR_FER);
+
+ for (i = 0; i < 3; i++) {
+ tc6393xb->suspend_state.gpo_dsr[i] =
+ ioread8(tc6393xb->scr + SCR_GPO_DSR(i));
+ tc6393xb->suspend_state.gpo_doecr[i] =
+ ioread8(tc6393xb->scr + SCR_GPO_DOECR(i));
+ tc6393xb->suspend_state.gpi_bcr[i] =
+ ioread8(tc6393xb->scr + SCR_GPI_BCR(i));
+ }
+
+ return tcpd->suspend(dev);
+}
+
+static int tc6393xb_resume(struct platform_device *dev)
+{
+ struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
+ int ret = tcpd->resume(dev);
+
+ if (ret)
+ return ret;
+
+ return tc6393xb_hw_init(dev);
+}
+#else
+#define tc6393xb_suspend NULL
+#define tc6393xb_resume NULL
+#endif
+
+static struct platform_driver tc6393xb_driver = {
+ .probe = tc6393xb_probe,
+ .remove = __devexit_p(tc6393xb_remove),
+ .suspend = tc6393xb_suspend,
+ .resume = tc6393xb_resume,
+
+ .driver = {
+ .name = "tc6393xb",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tc6393xb_init(void)
+{
+ return platform_driver_register(&tc6393xb_driver);
+}
+
+static void __exit tc6393xb_exit(void)
+{
+ platform_driver_unregister(&tc6393xb_driver);
+}
+
+subsys_initcall(tc6393xb_init);
+module_exit(tc6393xb_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov and Dirk Opfer");
+MODULE_DESCRIPTION("tc6393xb Toshiba Mobile IO Controller");
+MODULE_ALIAS("platform:tc6393xb");
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 08256ed0d9a..579b01ff82d 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -229,10 +229,11 @@ xpc_hb_checker(void *ignore)
int last_IRQ_count = 0;
int new_IRQ_count;
int force_IRQ = 0;
+ cpumask_of_cpu_ptr(cpumask, XPC_HB_CHECK_CPU);
/* this thread was marked active by xpc_hb_init() */
- set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU));
+ set_cpus_allowed_ptr(current, cpumask);
/* set our heartbeating to other partitions into motion */
xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index d6b9b486417..a067fe43630 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -21,13 +21,17 @@
#define RESULT_UNSUP_HOST 2
#define RESULT_UNSUP_CARD 3
-#define BUFFER_SIZE (PAGE_SIZE * 4)
+#define BUFFER_ORDER 2
+#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
struct mmc_test_card {
struct mmc_card *card;
u8 scratch[BUFFER_SIZE];
u8 *buffer;
+#ifdef CONFIG_HIGHMEM
+ struct page *highmem;
+#endif
};
/*******************************************************************/
@@ -384,14 +388,16 @@ static int mmc_test_transfer(struct mmc_test_card *test,
int ret, i;
unsigned long flags;
+ BUG_ON(blocks * blksz > BUFFER_SIZE);
+
if (write) {
for (i = 0;i < blocks * blksz;i++)
test->scratch[i] = i;
} else {
- memset(test->scratch, 0, BUFFER_SIZE);
+ memset(test->scratch, 0, blocks * blksz);
}
local_irq_save(flags);
- sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
+ sg_copy_from_buffer(sg, sg_len, test->scratch, blocks * blksz);
local_irq_restore(flags);
ret = mmc_test_set_blksize(test, blksz);
@@ -438,7 +444,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
}
} else {
local_irq_save(flags);
- sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
+ sg_copy_to_buffer(sg, sg_len, test->scratch, blocks * blksz);
local_irq_restore(flags);
for (i = 0;i < blocks * blksz;i++) {
if (test->scratch[i] != (u8)i)
@@ -799,6 +805,157 @@ static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
return 0;
}
+static int mmc_test_bigsg_write(struct mmc_test_card *test)
+{
+ int ret;
+ unsigned int size;
+ struct scatterlist sg;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+
+ memset(test->buffer, 0, BUFFER_SIZE);
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ sg_init_table(&sg, 1);
+ sg_init_one(&sg, test->buffer, BUFFER_SIZE);
+
+ ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_bigsg_read(struct mmc_test_card *test)
+{
+ int ret, i;
+ unsigned int size;
+ struct scatterlist sg;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ memset(test->buffer, 0xCD, BUFFER_SIZE);
+
+ sg_init_table(&sg, 1);
+ sg_init_one(&sg, test->buffer, BUFFER_SIZE);
+ ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+ if (ret)
+ return ret;
+
+ /* mmc_test_transfer() doesn't check for read overflows */
+ for (i = size;i < BUFFER_SIZE;i++) {
+ if (test->buffer[i] != 0xCD)
+ return RESULT_FAIL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_HIGHMEM
+
+static int mmc_test_write_high(struct mmc_test_card *test)
+{
+ int ret;
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, test->highmem, 512, 0);
+
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_read_high(struct mmc_test_card *test)
+{
+ int ret;
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, test->highmem, 512, 0);
+
+ ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_multi_write_high(struct mmc_test_card *test)
+{
+ int ret;
+ unsigned int size;
+ struct scatterlist sg;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, test->highmem, size, 0);
+
+ ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mmc_test_multi_read_high(struct mmc_test_card *test)
+{
+ int ret;
+ unsigned int size;
+ struct scatterlist sg;
+
+ if (test->card->host->max_blk_count == 1)
+ return RESULT_UNSUP_HOST;
+
+ size = PAGE_SIZE * 2;
+ size = min(size, test->card->host->max_req_size);
+ size = min(size, test->card->host->max_seg_size);
+ size = min(size, test->card->host->max_blk_count * 512);
+
+ if (size < 1024)
+ return RESULT_UNSUP_HOST;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, test->highmem, size, 0);
+
+ ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+#endif /* CONFIG_HIGHMEM */
+
static const struct mmc_test_case mmc_test_cases[] = {
{
.name = "Basic write (no data verification)",
@@ -913,6 +1070,53 @@ static const struct mmc_test_case mmc_test_cases[] = {
.name = "Correct xfer_size at read (midway failure)",
.run = mmc_test_multi_xfersize_read,
},
+
+ {
+ .name = "Over-sized SG list write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_bigsg_write,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Over-sized SG list read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_bigsg_read,
+ .cleanup = mmc_test_cleanup,
+ },
+
+#ifdef CONFIG_HIGHMEM
+
+ {
+ .name = "Highmem write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_write_high,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Highmem read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_read_high,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Multi-block highmem write",
+ .prepare = mmc_test_prepare_write,
+ .run = mmc_test_multi_write_high,
+ .cleanup = mmc_test_cleanup,
+ },
+
+ {
+ .name = "Multi-block highmem read",
+ .prepare = mmc_test_prepare_read,
+ .run = mmc_test_multi_read_high,
+ .cleanup = mmc_test_cleanup,
+ },
+
+#endif /* CONFIG_HIGHMEM */
+
};
static struct mutex mmc_test_lock;
@@ -1014,12 +1218,23 @@ static ssize_t mmc_test_store(struct device *dev,
test->card = card;
test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
+#ifdef CONFIG_HIGHMEM
+ test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
+#endif
+
+#ifdef CONFIG_HIGHMEM
+ if (test->buffer && test->highmem) {
+#else
if (test->buffer) {
+#endif
mutex_lock(&mmc_test_lock);
mmc_test_run(test, testcase);
mutex_unlock(&mmc_test_lock);
}
+#ifdef CONFIG_HIGHMEM
+ __free_pages(test->highmem, BUFFER_ORDER);
+#endif
kfree(test->buffer);
kfree(test);
@@ -1041,6 +1256,8 @@ static int mmc_test_probe(struct mmc_card *card)
if (ret)
return ret;
+ dev_info(&card->dev, "Card claimed for testing.\n");
+
return 0;
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 7731ddefdc1..3dee97e7d16 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -148,7 +148,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
printk(KERN_WARNING "%s: unable to allocate "
"bounce buffer\n", mmc_card_name(card));
} else {
- blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
+ blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_sectors(mq->queue, bouncesz / 512);
blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
@@ -290,55 +290,15 @@ void mmc_queue_resume(struct mmc_queue *mq)
}
}
-static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
- struct scatterlist *src, unsigned int src_len)
-{
- unsigned int chunk;
- char *dst_buf, *src_buf;
- unsigned int dst_size, src_size;
-
- dst_buf = NULL;
- src_buf = NULL;
- dst_size = 0;
- src_size = 0;
-
- while (src_len) {
- BUG_ON(dst_len == 0);
-
- if (dst_size == 0) {
- dst_buf = sg_virt(dst);
- dst_size = dst->length;
- }
-
- if (src_size == 0) {
- src_buf = sg_virt(src);
- src_size = src->length;
- }
-
- chunk = min(dst_size, src_size);
-
- memcpy(dst_buf, src_buf, chunk);
-
- dst_buf += chunk;
- src_buf += chunk;
- dst_size -= chunk;
- src_size -= chunk;
-
- if (dst_size == 0) {
- dst++;
- dst_len--;
- }
-
- if (src_size == 0) {
- src++;
- src_len--;
- }
- }
-}
-
+/*
+ * Prepare the sg list(s) to be handed of to the host driver
+ */
unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
{
unsigned int sg_len;
+ size_t buflen;
+ struct scatterlist *sg;
+ int i;
if (!mq->bounce_buf)
return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
@@ -349,47 +309,52 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
mq->bounce_sg_len = sg_len;
- /*
- * Shortcut in the event we only get a single entry.
- */
- if (sg_len == 1) {
- memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
- return 1;
- }
+ buflen = 0;
+ for_each_sg(mq->bounce_sg, sg, sg_len, i)
+ buflen += sg->length;
- sg_init_one(mq->sg, mq->bounce_buf, 0);
-
- while (sg_len) {
- mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
- sg_len--;
- }
+ sg_init_one(mq->sg, mq->bounce_buf, buflen);
return 1;
}
+/*
+ * If writing, bounce the data to the buffer before the request
+ * is sent to the host driver
+ */
void mmc_queue_bounce_pre(struct mmc_queue *mq)
{
+ unsigned long flags;
+
if (!mq->bounce_buf)
return;
- if (mq->bounce_sg_len == 1)
- return;
if (rq_data_dir(mq->req) != WRITE)
return;
- copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len);
+ local_irq_save(flags);
+ sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
+ mq->bounce_buf, mq->sg[0].length);
+ local_irq_restore(flags);
}
+/*
+ * If reading, bounce the data from the buffer after the request
+ * has been handled by the host driver
+ */
void mmc_queue_bounce_post(struct mmc_queue *mq)
{
+ unsigned long flags;
+
if (!mq->bounce_buf)
return;
- if (mq->bounce_sg_len == 1)
- return;
if (rq_data_dir(mq->req) != READ)
return;
- copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1);
+ local_irq_save(flags);
+ sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
+ mq->bounce_buf, mq->sg[0].length);
+ local_irq_restore(flags);
}
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 3f15eb20489..99b20917cc0 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1043,7 +1043,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
goto out6;
}
- platform_set_drvdata(pdev, mmc);
+ platform_set_drvdata(pdev, host);
printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X"
" (mode=%s)\n", pdev->id, host->iobase,
@@ -1087,13 +1087,10 @@ out0:
static int __devexit au1xmmc_remove(struct platform_device *pdev)
{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- struct au1xmmc_host *host;
-
- if (mmc) {
- host = mmc_priv(mmc);
+ struct au1xmmc_host *host = platform_get_drvdata(pdev);
- mmc_remove_host(mmc);
+ if (host) {
+ mmc_remove_host(host->mmc);
#ifdef CONFIG_LEDS_CLASS
if (host->platdata && host->platdata->led)
@@ -1101,8 +1098,8 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev)
#endif
if (host->platdata && host->platdata->cd_setup &&
- !(mmc->caps & MMC_CAP_NEEDS_POLL))
- host->platdata->cd_setup(mmc, 0);
+ !(host->mmc->caps & MMC_CAP_NEEDS_POLL))
+ host->platdata->cd_setup(host->mmc, 0);
au_writel(0, HOST_ENABLE(host));
au_writel(0, HOST_CONFIG(host));
@@ -1122,16 +1119,49 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev)
release_resource(host->ioarea);
kfree(host->ioarea);
- mmc_free_host(mmc);
+ mmc_free_host(host->mmc);
+ platform_set_drvdata(pdev, NULL);
}
return 0;
}
+#ifdef CONFIG_PM
+static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct au1xmmc_host *host = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = mmc_suspend_host(host->mmc, state);
+ if (ret)
+ return ret;
+
+ au_writel(0, HOST_CONFIG2(host));
+ au_writel(0, HOST_CONFIG(host));
+ au_writel(0xffffffff, HOST_STATUS(host));
+ au_writel(0, HOST_ENABLE(host));
+ au_sync();
+
+ return 0;
+}
+
+static int au1xmmc_resume(struct platform_device *pdev)
+{
+ struct au1xmmc_host *host = platform_get_drvdata(pdev);
+
+ au1xmmc_reset_controller(host);
+
+ return mmc_resume_host(host->mmc);
+}
+#else
+#define au1xmmc_suspend NULL
+#define au1xmmc_resume NULL
+#endif
+
static struct platform_driver au1xmmc_driver = {
.probe = au1xmmc_probe,
.remove = au1xmmc_remove,
- .suspend = NULL,
- .resume = NULL,
+ .suspend = au1xmmc_suspend,
+ .resume = au1xmmc_resume,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index d39f5973886..a8e18fe5307 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -177,7 +177,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
if (dalgn)
DALGN |= (1 << host->dma);
else
- DALGN &= (1 << host->dma);
+ DALGN &= ~(1 << host->dma);
DDADR(host->dma) = host->sg_dma;
DCSR(host->dma) = DCSR_RUN;
}
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 6a1e4994b72..be550c26da6 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1331,21 +1331,30 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
return ret;
}
+static void s3cmci_shutdown(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct s3cmci_host *host = mmc_priv(mmc);
+
+ if (host->irq_cd >= 0)
+ free_irq(host->irq_cd, host);
+
+ mmc_remove_host(mmc);
+ clk_disable(host->clk);
+}
+
static int __devexit s3cmci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct s3cmci_host *host = mmc_priv(mmc);
- mmc_remove_host(mmc);
+ s3cmci_shutdown(pdev);
- clk_disable(host->clk);
clk_put(host->clk);
tasklet_disable(&host->pio_tasklet);
s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client);
- if (host->irq_cd >= 0)
- free_irq(host->irq_cd, host);
free_irq(host->irq, host);
iounmap(host->base);
@@ -1355,17 +1364,17 @@ static int __devexit s3cmci_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit s3cmci_probe_2410(struct platform_device *dev)
+static int __devinit s3cmci_2410_probe(struct platform_device *dev)
{
return s3cmci_probe(dev, 0);
}
-static int __devinit s3cmci_probe_2412(struct platform_device *dev)
+static int __devinit s3cmci_2412_probe(struct platform_device *dev)
{
return s3cmci_probe(dev, 1);
}
-static int __devinit s3cmci_probe_2440(struct platform_device *dev)
+static int __devinit s3cmci_2440_probe(struct platform_device *dev)
{
return s3cmci_probe(dev, 1);
}
@@ -1392,29 +1401,32 @@ static int s3cmci_resume(struct platform_device *dev)
#endif /* CONFIG_PM */
-static struct platform_driver s3cmci_driver_2410 = {
+static struct platform_driver s3cmci_2410_driver = {
.driver.name = "s3c2410-sdi",
.driver.owner = THIS_MODULE,
- .probe = s3cmci_probe_2410,
+ .probe = s3cmci_2410_probe,
.remove = __devexit_p(s3cmci_remove),
+ .shutdown = s3cmci_shutdown,
.suspend = s3cmci_suspend,
.resume = s3cmci_resume,
};
-static struct platform_driver s3cmci_driver_2412 = {
+static struct platform_driver s3cmci_2412_driver = {
.driver.name = "s3c2412-sdi",
.driver.owner = THIS_MODULE,
- .probe = s3cmci_probe_2412,
+ .probe = s3cmci_2412_probe,
.remove = __devexit_p(s3cmci_remove),
+ .shutdown = s3cmci_shutdown,
.suspend = s3cmci_suspend,
.resume = s3cmci_resume,
};
-static struct platform_driver s3cmci_driver_2440 = {
+static struct platform_driver s3cmci_2440_driver = {
.driver.name = "s3c2440-sdi",
.driver.owner = THIS_MODULE,
- .probe = s3cmci_probe_2440,
+ .probe = s3cmci_2440_probe,
.remove = __devexit_p(s3cmci_remove),
+ .shutdown = s3cmci_shutdown,
.suspend = s3cmci_suspend,
.resume = s3cmci_resume,
};
@@ -1422,17 +1434,17 @@ static struct platform_driver s3cmci_driver_2440 = {
static int __init s3cmci_init(void)
{
- platform_driver_register(&s3cmci_driver_2410);
- platform_driver_register(&s3cmci_driver_2412);
- platform_driver_register(&s3cmci_driver_2440);
+ platform_driver_register(&s3cmci_2410_driver);
+ platform_driver_register(&s3cmci_2412_driver);
+ platform_driver_register(&s3cmci_2440_driver);
return 0;
}
static void __exit s3cmci_exit(void)
{
- platform_driver_unregister(&s3cmci_driver_2410);
- platform_driver_unregister(&s3cmci_driver_2412);
- platform_driver_unregister(&s3cmci_driver_2440);
+ platform_driver_unregister(&s3cmci_2410_driver);
+ platform_driver_unregister(&s3cmci_2412_driver);
+ platform_driver_unregister(&s3cmci_2440_driver);
}
module_init(s3cmci_init);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 17701c3da73..c3a5db72ddd 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -173,119 +173,95 @@ static void sdhci_led_control(struct led_classdev *led,
* *
\*****************************************************************************/
-static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
-{
- return sg_virt(host->cur_sg);
-}
-
-static inline int sdhci_next_sg(struct sdhci_host* host)
-{
- /*
- * Skip to next SG entry.
- */
- host->cur_sg++;
- host->num_sg--;
-
- /*
- * Any entries left?
- */
- if (host->num_sg > 0) {
- host->offset = 0;
- host->remain = host->cur_sg->length;
- }
-
- return host->num_sg;
-}
-
static void sdhci_read_block_pio(struct sdhci_host *host)
{
- int blksize, chunk_remain;
- u32 data;
- char *buffer;
- int size;
+ unsigned long flags;
+ size_t blksize, len, chunk;
+ u32 scratch;
+ u8 *buf;
DBG("PIO reading\n");
blksize = host->data->blksz;
- chunk_remain = 0;
- data = 0;
+ chunk = 0;
- buffer = sdhci_sg_to_buffer(host) + host->offset;
+ local_irq_save(flags);
while (blksize) {
- if (chunk_remain == 0) {
- data = readl(host->ioaddr + SDHCI_BUFFER);
- chunk_remain = min(blksize, 4);
- }
+ if (!sg_miter_next(&host->sg_miter))
+ BUG();
- size = min(host->remain, chunk_remain);
+ len = min(host->sg_miter.length, blksize);
- chunk_remain -= size;
- blksize -= size;
- host->offset += size;
- host->remain -= size;
+ blksize -= len;
+ host->sg_miter.consumed = len;
- while (size) {
- *buffer = data & 0xFF;
- buffer++;
- data >>= 8;
- size--;
- }
+ buf = host->sg_miter.addr;
- if (host->remain == 0) {
- if (sdhci_next_sg(host) == 0) {
- BUG_ON(blksize != 0);
- return;
+ while (len) {
+ if (chunk == 0) {
+ scratch = readl(host->ioaddr + SDHCI_BUFFER);
+ chunk = 4;
}
- buffer = sdhci_sg_to_buffer(host);
+
+ *buf = scratch & 0xFF;
+
+ buf++;
+ scratch >>= 8;
+ chunk--;
+ len--;
}
}
+
+ sg_miter_stop(&host->sg_miter);
+
+ local_irq_restore(flags);
}
static void sdhci_write_block_pio(struct sdhci_host *host)
{
- int blksize, chunk_remain;
- u32 data;
- char *buffer;
- int bytes, size;
+ unsigned long flags;
+ size_t blksize, len, chunk;
+ u32 scratch;
+ u8 *buf;
DBG("PIO writing\n");
blksize = host->data->blksz;
- chunk_remain = 4;
- data = 0;
+ chunk = 0;
+ scratch = 0;
- bytes = 0;
- buffer = sdhci_sg_to_buffer(host) + host->offset;
+ local_irq_save(flags);
while (blksize) {
- size = min(host->remain, chunk_remain);
-
- chunk_remain -= size;
- blksize -= size;
- host->offset += size;
- host->remain -= size;
-
- while (size) {
- data >>= 8;
- data |= (u32)*buffer << 24;
- buffer++;
- size--;
- }
+ if (!sg_miter_next(&host->sg_miter))
+ BUG();
- if (chunk_remain == 0) {
- writel(data, host->ioaddr + SDHCI_BUFFER);
- chunk_remain = min(blksize, 4);
- }
+ len = min(host->sg_miter.length, blksize);
+
+ blksize -= len;
+ host->sg_miter.consumed = len;
+
+ buf = host->sg_miter.addr;
- if (host->remain == 0) {
- if (sdhci_next_sg(host) == 0) {
- BUG_ON(blksize != 0);
- return;
+ while (len) {
+ scratch |= (u32)*buf << (chunk * 8);
+
+ buf++;
+ chunk++;
+ len--;
+
+ if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
+ writel(scratch, host->ioaddr + SDHCI_BUFFER);
+ chunk = 0;
+ scratch = 0;
}
- buffer = sdhci_sg_to_buffer(host);
}
}
+
+ sg_miter_stop(&host->sg_miter);
+
+ local_irq_restore(flags);
}
static void sdhci_transfer_pio(struct sdhci_host *host)
@@ -294,7 +270,7 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
BUG_ON(!host->data);
- if (host->num_sg == 0)
+ if (host->blocks == 0)
return;
if (host->data->flags & MMC_DATA_READ)
@@ -308,7 +284,8 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
else
sdhci_write_block_pio(host);
- if (host->num_sg == 0)
+ host->blocks--;
+ if (host->blocks == 0)
break;
}
@@ -389,6 +366,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
if (offset) {
if (data->flags & MMC_DATA_WRITE) {
buffer = sdhci_kmap_atomic(sg, &flags);
+ WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
memcpy(align, buffer, offset);
sdhci_kunmap_atomic(buffer, &flags);
}
@@ -510,6 +488,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
size = 4 - (sg_dma_address(sg) & 0x3);
buffer = sdhci_kmap_atomic(sg, &flags);
+ WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
memcpy(buffer, align, size);
sdhci_kunmap_atomic(buffer, &flags);
@@ -687,7 +666,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
WARN_ON(1);
host->flags &= ~SDHCI_USE_DMA;
} else {
- WARN_ON(count != 1);
+ WARN_ON(sg_cnt != 1);
writel(sg_dma_address(data->sg),
host->ioaddr + SDHCI_DMA_ADDRESS);
}
@@ -711,11 +690,9 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
}
if (!(host->flags & SDHCI_REQ_USE_DMA)) {
- host->cur_sg = data->sg;
- host->num_sg = data->sg_len;
-
- host->offset = 0;
- host->remain = host->cur_sg->length;
+ sg_miter_start(&host->sg_miter,
+ data->sg, data->sg_len, SG_MITER_ATOMIC);
+ host->blocks = data->blocks;
}
/* We do not handle DMA boundaries, so set it to max (512 KiB) */
@@ -1581,9 +1558,15 @@ int sdhci_add_host(struct sdhci_host *host)
}
}
- /* XXX: Hack to get MMC layer to avoid highmem */
- if (!(host->flags & SDHCI_USE_DMA))
- mmc_dev(host->mmc)->dma_mask = NULL;
+ /*
+ * If we use DMA, then it's up to the caller to set the DMA
+ * mask, but PIO does not need the hw shim so we set a new
+ * mask here in that case.
+ */
+ if (!(host->flags & SDHCI_USE_DMA)) {
+ host->dma_mask = DMA_BIT_MASK(64);
+ mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
+ }
host->max_clk =
(caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 5bb35528176..a06bf8b8934 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -212,6 +212,7 @@ struct sdhci_host {
/* Internal data */
struct mmc_host *mmc; /* MMC structure */
+ u64 dma_mask; /* custom DMA mask */
#ifdef CONFIG_LEDS_CLASS
struct led_classdev led; /* LED control */
@@ -238,10 +239,8 @@ struct sdhci_host {
struct mmc_data *data; /* Current data request */
unsigned int data_early:1; /* Data finished before cmd */
- struct scatterlist *cur_sg; /* We're working on this */
- int num_sg; /* Entries left */
- int offset; /* Offset into current sg */
- int remain; /* Bytes left in current */
+ struct sg_mapping_iter sg_miter; /* SG state for PIO */
+ unsigned int blocks; /* remaining PIO blocks */
int sg_count; /* Mapped sg entries */
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index cb663ef245d..fc8529bedfd 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -20,9 +20,11 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/gpio.h>
#include <asm/io.h>
#include <asm/irq.h>
+#include <asm/mach-types.h>
#include <asm/arch/hardware.h>
#include <asm/arch/pxa-regs.h>
@@ -30,20 +32,6 @@
#define GPIO_NAND_CS (11)
#define GPIO_NAND_RB (89)
-/* This macro needed to ensure in-order operation of GPIO and local
- * bus. Without both asm command and dummy uncached read there're
- * states when NAND access is broken. I've looked for such macro(s) in
- * include/asm-arm but found nothing approptiate.
- * dmac_clean_range is close, but is makes cache invalidation
- * unnecessary here and it cannot be used in module
- */
-#define DRAIN_WB() \
- do { \
- unsigned char dummy; \
- asm volatile ("mcr p15, 0, r0, c7, c10, 4":::"r0"); \
- dummy=*((unsigned char*)UNCACHED_ADDR); \
- } while(0)
-
/* MTD structure for CM-X270 board */
static struct mtd_info *cmx270_nand_mtd;
@@ -103,14 +91,14 @@ static int cmx270_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
static inline void nand_cs_on(void)
{
- GPCR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS);
+ gpio_set_value(GPIO_NAND_CS, 0);
}
static void nand_cs_off(void)
{
- DRAIN_WB();
+ dsb();
- GPSR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS);
+ gpio_set_value(GPIO_NAND_CS, 1);
}
/*
@@ -122,7 +110,7 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
struct nand_chip* this = mtd->priv;
unsigned int nandaddr = (unsigned int)this->IO_ADDR_W;
- DRAIN_WB();
+ dsb();
if (ctrl & NAND_CTRL_CHANGE) {
if ( ctrl & NAND_ALE )
@@ -139,12 +127,12 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
nand_cs_off();
}
- DRAIN_WB();
+ dsb();
this->IO_ADDR_W = (void __iomem*)nandaddr;
if (dat != NAND_CMD_NONE)
writel((dat << 16), this->IO_ADDR_W);
- DRAIN_WB();
+ dsb();
}
/*
@@ -152,9 +140,9 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
*/
static int cmx270_device_ready(struct mtd_info *mtd)
{
- DRAIN_WB();
+ dsb();
- return (GPLR(GPIO_NAND_RB) & GPIO_bit(GPIO_NAND_RB));
+ return (gpio_get_value(GPIO_NAND_RB));
}
/*
@@ -168,20 +156,40 @@ static int cmx270_init(void)
int mtd_parts_nb = 0;
int ret;
+ if (!machine_is_armcore())
+ return -ENODEV;
+
+ ret = gpio_request(GPIO_NAND_CS, "NAND CS");
+ if (ret) {
+ pr_warning("CM-X270: failed to request NAND CS gpio\n");
+ return ret;
+ }
+
+ gpio_direction_output(GPIO_NAND_CS, 1);
+
+ ret = gpio_request(GPIO_NAND_RB, "NAND R/B");
+ if (ret) {
+ pr_warning("CM-X270: failed to request NAND R/B gpio\n");
+ goto err_gpio_request;
+ }
+
+ gpio_direction_input(GPIO_NAND_RB);
+
/* Allocate memory for MTD device structure and private data */
cmx270_nand_mtd = kzalloc(sizeof(struct mtd_info) +
sizeof(struct nand_chip),
GFP_KERNEL);
if (!cmx270_nand_mtd) {
- printk("Unable to allocate CM-X270 NAND MTD device structure.\n");
- return -ENOMEM;
+ pr_debug("Unable to allocate CM-X270 NAND MTD device structure.\n");
+ ret = -ENOMEM;
+ goto err_kzalloc;
}
cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12);
if (!cmx270_nand_io) {
- printk("Unable to ioremap NAND device\n");
+ pr_debug("Unable to ioremap NAND device\n");
ret = -EINVAL;
- goto err1;
+ goto err_ioremap;
}
/* Get pointer to private data */
@@ -209,9 +217,9 @@ static int cmx270_init(void)
/* Scan to find existence of the device */
if (nand_scan (cmx270_nand_mtd, 1)) {
- printk(KERN_NOTICE "No NAND device\n");
+ pr_notice("No NAND device\n");
ret = -ENXIO;
- goto err2;
+ goto err_scan;
}
#ifdef CONFIG_MTD_CMDLINE_PARTS
@@ -229,18 +237,22 @@ static int cmx270_init(void)
}
/* Register the partitions */
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
+ pr_notice("Using %s partition definition\n", part_type);
ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
if (ret)
- goto err2;
+ goto err_scan;
/* Return happy */
return 0;
-err2:
+err_scan:
iounmap(cmx270_nand_io);
-err1:
+err_ioremap:
kfree(cmx270_nand_mtd);
+err_kzalloc:
+ gpio_free(GPIO_NAND_RB);
+err_gpio_request:
+ gpio_free(GPIO_NAND_CS);
return ret;
@@ -255,6 +267,9 @@ static void cmx270_cleanup(void)
/* Release resources, unregister device */
nand_release(cmx270_nand_mtd);
+ gpio_free(GPIO_NAND_RB);
+ gpio_free(GPIO_NAND_CS);
+
iounmap(cmx270_nand_io);
/* Free the MTD device structure */
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index f2051b209da..2040965d772 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -308,7 +308,7 @@ static void smc_reset(struct net_device *dev)
* can't handle it then there will be no recovery except for
* a hard reset or power cycle
*/
- if (nowait)
+ if (lp->cfg.flags & SMC91X_NOWAIT)
cfg |= CONFIG_NO_WAIT;
/*
@@ -1939,8 +1939,11 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
if (retval)
goto err_out;
-#ifdef SMC_USE_PXA_DMA
- {
+#ifdef CONFIG_ARCH_PXA
+# ifdef SMC_USE_PXA_DMA
+ lp->cfg.flags |= SMC91X_USE_DMA;
+# endif
+ if (lp->cfg.flags & SMC91X_USE_DMA) {
int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW,
smc_pxa_dma_irq, NULL);
if (dma >= 0)
@@ -1980,7 +1983,7 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
}
err_out:
-#ifdef SMC_USE_PXA_DMA
+#ifdef CONFIG_ARCH_PXA
if (retval && dev->dma != (unsigned char)-1)
pxa_free_dma(dev->dma);
#endif
@@ -2050,9 +2053,11 @@ static int smc_enable_device(struct platform_device *pdev)
return 0;
}
-static int smc_request_attrib(struct platform_device *pdev)
+static int smc_request_attrib(struct platform_device *pdev,
+ struct net_device *ndev)
{
struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
+ struct smc_local *lp = netdev_priv(ndev);
if (!res)
return 0;
@@ -2063,9 +2068,11 @@ static int smc_request_attrib(struct platform_device *pdev)
return 0;
}
-static void smc_release_attrib(struct platform_device *pdev)
+static void smc_release_attrib(struct platform_device *pdev,
+ struct net_device *ndev)
{
struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
+ struct smc_local *lp = netdev_priv(ndev);
if (res)
release_mem_region(res->start, ATTRIB_SIZE);
@@ -2123,27 +2130,14 @@ static int smc_drv_probe(struct platform_device *pdev)
struct net_device *ndev;
struct resource *res, *ires;
unsigned int __iomem *addr;
+ unsigned long irq_flags = SMC_IRQ_FLAGS;
int ret;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
- if (!res)
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- goto out;
- }
-
-
- if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) {
- ret = -EBUSY;
- goto out;
- }
-
ndev = alloc_etherdev(sizeof(struct smc_local));
if (!ndev) {
printk("%s: could not allocate device.\n", CARDNAME);
ret = -ENOMEM;
- goto out_release_io;
+ goto out;
}
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2152,37 +2146,47 @@ static int smc_drv_probe(struct platform_device *pdev)
*/
lp = netdev_priv(ndev);
- lp->cfg.irq_flags = SMC_IRQ_FLAGS;
-#ifdef SMC_DYNAMIC_BUS_CONFIG
- if (pd)
+ if (pd) {
memcpy(&lp->cfg, pd, sizeof(lp->cfg));
- else {
- lp->cfg.flags = SMC91X_USE_8BIT;
- lp->cfg.flags |= SMC91X_USE_16BIT;
- lp->cfg.flags |= SMC91X_USE_32BIT;
+ lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
+ } else {
+ lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0;
+ lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0;
+ lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0;
+ lp->cfg.flags |= (nowait) ? SMC91X_NOWAIT : 0;
}
- lp->cfg.flags &= ~(SMC_CAN_USE_8BIT ? 0 : SMC91X_USE_8BIT);
- lp->cfg.flags &= ~(SMC_CAN_USE_16BIT ? 0 : SMC91X_USE_16BIT);
- lp->cfg.flags &= ~(SMC_CAN_USE_32BIT ? 0 : SMC91X_USE_32BIT);
-#endif
-
ndev->dma = (unsigned char)-1;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto out_free_netdev;
+ }
+
+
+ if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) {
+ ret = -EBUSY;
+ goto out_free_netdev;
+ }
+
ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!ires) {
ret = -ENODEV;
- goto out_free_netdev;
+ goto out_release_io;
}
ndev->irq = ires->start;
- if (SMC_IRQ_FLAGS == -1)
- lp->cfg.irq_flags = ires->flags & IRQF_TRIGGER_MASK;
- ret = smc_request_attrib(pdev);
+ if (ires->flags & IRQF_TRIGGER_MASK)
+ irq_flags = ires->flags & IRQF_TRIGGER_MASK;
+
+ ret = smc_request_attrib(pdev, ndev);
if (ret)
- goto out_free_netdev;
+ goto out_release_io;
#if defined(CONFIG_SA1100_ASSABET)
NCR_0 |= NCR_ENET_OSC_EN;
#endif
@@ -2197,7 +2201,7 @@ static int smc_drv_probe(struct platform_device *pdev)
goto out_release_attrib;
}
-#ifdef SMC_USE_PXA_DMA
+#ifdef CONFIG_ARCH_PXA
{
struct smc_local *lp = netdev_priv(ndev);
lp->device = &pdev->dev;
@@ -2205,7 +2209,7 @@ static int smc_drv_probe(struct platform_device *pdev)
}
#endif
- ret = smc_probe(ndev, addr, lp->cfg.irq_flags);
+ ret = smc_probe(ndev, addr, irq_flags);
if (ret != 0)
goto out_iounmap;
@@ -2217,11 +2221,11 @@ static int smc_drv_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
iounmap(addr);
out_release_attrib:
- smc_release_attrib(pdev);
- out_free_netdev:
- free_netdev(ndev);
+ smc_release_attrib(pdev, ndev);
out_release_io:
release_mem_region(res->start, SMC_IO_EXTENT);
+ out_free_netdev:
+ free_netdev(ndev);
out:
printk("%s: not found (%d).\n", CARDNAME, ret);
@@ -2240,14 +2244,14 @@ static int smc_drv_remove(struct platform_device *pdev)
free_irq(ndev->irq, ndev);
-#ifdef SMC_USE_PXA_DMA
+#ifdef CONFIG_ARCH_PXA
if (ndev->dma != (unsigned char)-1)
pxa_free_dma(ndev->dma);
#endif
iounmap(lp->base);
smc_release_datacs(pdev,ndev);
- smc_release_attrib(pdev);
+ smc_release_attrib(pdev,ndev);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
if (!res)
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 8606818653f..22209b6f140 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -40,23 +40,46 @@
* Define your architecture specific bus configuration parameters here.
*/
-#if defined(CONFIG_ARCH_LUBBOCK)
+#if defined(CONFIG_ARCH_LUBBOCK) ||\
+ defined(CONFIG_MACH_MAINSTONE) ||\
+ defined(CONFIG_MACH_ZYLONITE) ||\
+ defined(CONFIG_MACH_LITTLETON)
-/* We can only do 16-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT 0
+#include <asm/mach-types.h>
+
+/* Now the bus width is specified in the platform data
+ * pretend here to support all I/O access types
+ */
+#define SMC_CAN_USE_8BIT 1
#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
+#define SMC_CAN_USE_32BIT 1
#define SMC_NOWAIT 1
-/* The first two address lines aren't connected... */
-#define SMC_IO_SHIFT 2
+#define SMC_IO_SHIFT (lp->io_shift)
+#define SMC_inb(a, r) readb((a) + (r))
#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
#define SMC_IRQ_FLAGS (-1) /* from resource */
+/* We actually can't write halfwords properly if not word aligned */
+static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+{
+ if (machine_is_mainstone() && reg & 2) {
+ unsigned int v = val << 16;
+ v |= readl(ioaddr + (reg & ~2)) & 0xffff;
+ writel(v, ioaddr + (reg & ~2));
+ } else {
+ writew(val, ioaddr + reg);
+ }
+}
+
#elif defined(CONFIG_BLACKFIN)
#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
@@ -195,7 +218,6 @@
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
#elif defined(CONFIG_ARCH_INNOKOM) || \
- defined(CONFIG_MACH_MAINSTONE) || \
defined(CONFIG_ARCH_PXA_IDP) || \
defined(CONFIG_ARCH_RAMSES) || \
defined(CONFIG_ARCH_PCM027)
@@ -229,22 +251,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
}
}
-#elif defined(CONFIG_MACH_ZYLONITE)
-
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-#define SMC_USE_PXA_DMA 1
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_insw(a, r, p, l) insw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) outsw((a) + (r), p, l)
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_IRQ_FLAGS (-1) /* from resource */
-
#elif defined(CONFIG_ARCH_OMAP)
/* We can only do 16-bit reads and writes in the static memory space. */
@@ -454,7 +460,6 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
#define RPC_LSA_DEFAULT RPC_LED_100_10
#define RPC_LSB_DEFAULT RPC_LED_TX_RX
-#define SMC_DYNAMIC_BUS_CONFIG
#endif
@@ -493,7 +498,7 @@ struct smc_local {
spinlock_t lock;
-#ifdef SMC_USE_PXA_DMA
+#ifdef CONFIG_ARCH_PXA
/* DMA needs the physical address of the chip */
u_long physaddr;
struct device *device;
@@ -501,20 +506,17 @@ struct smc_local {
void __iomem *base;
void __iomem *datacs;
+ /* the low address lines on some platforms aren't connected... */
+ int io_shift;
+
struct smc91x_platdata cfg;
};
-#ifdef SMC_DYNAMIC_BUS_CONFIG
-#define SMC_8BIT(p) (((p)->cfg.flags & SMC91X_USE_8BIT) && SMC_CAN_USE_8BIT)
-#define SMC_16BIT(p) (((p)->cfg.flags & SMC91X_USE_16BIT) && SMC_CAN_USE_16BIT)
-#define SMC_32BIT(p) (((p)->cfg.flags & SMC91X_USE_32BIT) && SMC_CAN_USE_32BIT)
-#else
-#define SMC_8BIT(p) SMC_CAN_USE_8BIT
-#define SMC_16BIT(p) SMC_CAN_USE_16BIT
-#define SMC_32BIT(p) SMC_CAN_USE_32BIT
-#endif
+#define SMC_8BIT(p) ((p)->cfg.flags & SMC91X_USE_8BIT)
+#define SMC_16BIT(p) ((p)->cfg.flags & SMC91X_USE_16BIT)
+#define SMC_32BIT(p) ((p)->cfg.flags & SMC91X_USE_32BIT)
-#ifdef SMC_USE_PXA_DMA
+#ifdef CONFIG_ARCH_PXA
/*
* Let's use the DMA engine on the XScale PXA2xx for RX packets. This is
* always happening in irq context so no need to worry about races. TX is
@@ -608,7 +610,7 @@ smc_pxa_dma_irq(int dma, void *dummy)
{
DCSR(dma) = 0;
}
-#endif /* SMC_USE_PXA_DMA */
+#endif /* CONFIG_ARCH_PXA */
/*
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index e45402adac3..e0f884034c9 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -219,7 +219,8 @@ config PCMCIA_SA1111
config PCMCIA_PXA2XX
tristate "PXA2xx support"
depends on ARM && ARCH_PXA && PCMCIA
- depends on ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL || MACH_ARMCORE
+ depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
+ || MACH_ARMCORE || ARCH_PXA_PALM)
help
Say Y here to include support for the PXA2xx PCMCIA controller
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 85c6cc931f9..269a9e913ba 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -72,4 +72,5 @@ pxa2xx_cs-$(CONFIG_ARCH_LUBBOCK) += pxa2xx_lubbock.o sa1111_generic.o
pxa2xx_cs-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o
pxa2xx_cs-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o
pxa2xx_cs-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x270.o
+pxa2xx_cs-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
index f123fce65f2..bb95db7d2b7 100644
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ b/drivers/pcmcia/pxa2xx_cm_x270.c
@@ -5,83 +5,60 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * Compulab Ltd., 2003, 2007
+ * Compulab Ltd., 2003, 2007, 2008
* Mike Rapoport <mike@compulab.co.il>
*
*/
-#include <linux/kernel.h>
-#include <linux/sched.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/delay.h>
+#include <linux/gpio.h>
-#include <pcmcia/ss.h>
-#include <asm/hardware.h>
#include <asm/mach-types.h>
-
#include <asm/arch/pxa-regs.h>
-#include <asm/arch/pxa2xx-gpio.h>
-#include <asm/arch/cm-x270.h>
#include "soc_common.h"
+#define GPIO_PCMCIA_S0_CD_VALID (84)
+#define GPIO_PCMCIA_S0_RDYINT (82)
+#define GPIO_PCMCIA_RESET (53)
+
+#define PCMCIA_S0_CD_VALID IRQ_GPIO(GPIO_PCMCIA_S0_CD_VALID)
+#define PCMCIA_S0_RDYINT IRQ_GPIO(GPIO_PCMCIA_S0_RDYINT)
+
+
static struct pcmcia_irqs irqs[] = {
{ 0, PCMCIA_S0_CD_VALID, "PCMCIA0 CD" },
- { 1, PCMCIA_S1_CD_VALID, "PCMCIA1 CD" },
};
static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
- GPSR(GPIO48_nPOE) = GPIO_bit(GPIO48_nPOE) |
- GPIO_bit(GPIO49_nPWE) |
- GPIO_bit(GPIO50_nPIOR) |
- GPIO_bit(GPIO51_nPIOW) |
- GPIO_bit(GPIO85_nPCE_1) |
- GPIO_bit(GPIO54_nPCE_2);
-
- pxa_gpio_mode(GPIO48_nPOE_MD);
- pxa_gpio_mode(GPIO49_nPWE_MD);
- pxa_gpio_mode(GPIO50_nPIOR_MD);
- pxa_gpio_mode(GPIO51_nPIOW_MD);
- pxa_gpio_mode(GPIO85_nPCE_1_MD);
- pxa_gpio_mode(GPIO54_nPCE_2_MD);
- pxa_gpio_mode(GPIO55_nPREG_MD);
- pxa_gpio_mode(GPIO56_nPWAIT_MD);
- pxa_gpio_mode(GPIO57_nIOIS16_MD);
-
- /* Reset signal */
- pxa_gpio_mode(GPIO53_nPCE_2 | GPIO_OUT);
- GPCR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
-
- set_irq_type(PCMCIA_S0_CD_VALID, IRQ_TYPE_EDGE_BOTH);
- set_irq_type(PCMCIA_S1_CD_VALID, IRQ_TYPE_EDGE_BOTH);
-
- /* irq's for slots: */
- set_irq_type(PCMCIA_S0_RDYINT, IRQ_TYPE_EDGE_FALLING);
- set_irq_type(PCMCIA_S1_RDYINT, IRQ_TYPE_EDGE_FALLING);
-
- skt->irq = (skt->nr == 0) ? PCMCIA_S0_RDYINT : PCMCIA_S1_RDYINT;
- return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
+ int ret = gpio_request(GPIO_PCMCIA_RESET, "PCCard reset");
+ if (ret)
+ return ret;
+ gpio_direction_output(GPIO_PCMCIA_RESET, 0);
+
+ skt->irq = PCMCIA_S0_RDYINT;
+ ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
+ if (!ret)
+ gpio_free(GPIO_PCMCIA_RESET);
+
+ return ret;
}
static void cmx270_pcmcia_shutdown(struct soc_pcmcia_socket *skt)
{
soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs));
-
- set_irq_type(IRQ_TO_GPIO(PCMCIA_S0_CD_VALID), IRQ_TYPE_NONE);
- set_irq_type(IRQ_TO_GPIO(PCMCIA_S1_CD_VALID), IRQ_TYPE_NONE);
-
- set_irq_type(IRQ_TO_GPIO(PCMCIA_S0_RDYINT), IRQ_TYPE_NONE);
- set_irq_type(IRQ_TO_GPIO(PCMCIA_S1_RDYINT), IRQ_TYPE_NONE);
+ gpio_free(GPIO_PCMCIA_RESET);
}
static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
struct pcmcia_state *state)
{
- state->detect = (PCC_DETECT(skt->nr) == 0) ? 1 : 0;
- state->ready = (PCC_READY(skt->nr) == 0) ? 0 : 1;
+ state->detect = (gpio_get_value(GPIO_PCMCIA_S0_CD_VALID) == 0) ? 1 : 0;
+ state->ready = (gpio_get_value(GPIO_PCMCIA_S0_RDYINT) == 0) ? 0 : 1;
state->bvd1 = 1;
state->bvd2 = 1;
state->vs_3v = 0;
@@ -93,32 +70,16 @@ static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
static int cmx270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
const socket_state_t *state)
{
- GPSR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
- pxa_gpio_mode(GPIO49_nPWE | GPIO_OUT);
-
switch (skt->nr) {
case 0:
if (state->flags & SS_RESET) {
- GPCR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
- GPSR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
- udelay(10);
- GPCR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
- GPSR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
- }
- break;
- case 1:
- if (state->flags & SS_RESET) {
- GPCR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
- GPSR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
+ gpio_set_value(GPIO_PCMCIA_RESET, 1);
udelay(10);
- GPCR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
- GPSR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
+ gpio_set_value(GPIO_PCMCIA_RESET, 0);
}
break;
}
- pxa_gpio_mode(GPIO49_nPWE_MD);
-
return 0;
}
@@ -139,7 +100,7 @@ static struct pcmcia_low_level cmx270_pcmcia_ops __initdata = {
.configure_socket = cmx270_pcmcia_configure_socket,
.socket_init = cmx270_pcmcia_socket_init,
.socket_suspend = cmx270_pcmcia_socket_suspend,
- .nr = 2,
+ .nr = 1,
};
static struct platform_device *cmx270_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c
new file mode 100644
index 00000000000..4abde190c1f
--- /dev/null
+++ b/drivers/pcmcia/pxa2xx_palmtx.c
@@ -0,0 +1,118 @@
+/*
+ * linux/drivers/pcmcia/pxa2xx_palmtx.c
+ *
+ * Driver for Palm T|X PCMCIA
+ *
+ * Copyright (C) 2007-2008 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-types.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/palmtx.h>
+
+#include "soc_common.h"
+
+static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
+{
+ skt->irq = IRQ_GPIO(GPIO_NR_PALMTX_PCMCIA_READY);
+ return 0;
+}
+
+static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
+{
+}
+
+static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
+ struct pcmcia_state *state)
+{
+ state->detect = 1; /* always inserted */
+ state->ready = !!gpio_get_value(GPIO_NR_PALMTX_PCMCIA_READY);
+ state->bvd1 = 1;
+ state->bvd2 = 1;
+ state->wrprot = 0;
+ state->vs_3v = 1;
+ state->vs_Xv = 0;
+}
+
+static int
+palmtx_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
+ const socket_state_t *state)
+{
+ gpio_set_value(GPIO_NR_PALMTX_PCMCIA_POWER1, 1);
+ gpio_set_value(GPIO_NR_PALMTX_PCMCIA_POWER2, 1);
+ gpio_set_value(GPIO_NR_PALMTX_PCMCIA_RESET,
+ !!(state->flags & SS_RESET));
+
+ return 0;
+}
+
+static void palmtx_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
+{
+}
+
+static void palmtx_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
+{
+}
+
+static struct pcmcia_low_level palmtx_pcmcia_ops = {
+ .owner = THIS_MODULE,
+
+ .first = 0,
+ .nr = 1,
+
+ .hw_init = palmtx_pcmcia_hw_init,
+ .hw_shutdown = palmtx_pcmcia_hw_shutdown,
+
+ .socket_state = palmtx_pcmcia_socket_state,
+ .configure_socket = palmtx_pcmcia_configure_socket,
+
+ .socket_init = palmtx_pcmcia_socket_init,
+ .socket_suspend = palmtx_pcmcia_socket_suspend,
+};
+
+static struct platform_device *palmtx_pcmcia_device;
+
+static int __init palmtx_pcmcia_init(void)
+{
+ int ret;
+
+ if (!machine_is_palmtx())
+ return -ENODEV;
+
+ palmtx_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
+ if (!palmtx_pcmcia_device)
+ return -ENOMEM;
+
+ ret = platform_device_add_data(palmtx_pcmcia_device, &palmtx_pcmcia_ops,
+ sizeof(palmtx_pcmcia_ops));
+
+ if (!ret)
+ ret = platform_device_add(palmtx_pcmcia_device);
+
+ if (ret)
+ platform_device_put(palmtx_pcmcia_device);
+
+ return ret;
+}
+
+static void __exit palmtx_pcmcia_exit(void)
+{
+ platform_device_unregister(palmtx_pcmcia_device);
+}
+
+fs_initcall(palmtx_pcmcia_init);
+module_exit(palmtx_pcmcia_exit);
+
+MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
+MODULE_DESCRIPTION("PCMCIA support for Palm T|X");
+MODULE_ALIAS("platform:pxa2xx-pcmcia");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 58c806e9c58..4d17d384578 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -49,4 +49,10 @@ config BATTERY_OLPC
help
Say Y to enable support for the battery on the OLPC laptop.
+config BATTERY_PALMTX
+ tristate "Palm T|X battery"
+ depends on MACH_PALMTX
+ help
+ Say Y to enable support for the battery in Palm T|X.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 6413ded5fe5..6f43a54ee42 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_APM_POWER) += apm_power.o
obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
+obj-$(CONFIG_BATTERY_PALMTX) += palmtx_battery.o
diff --git a/drivers/power/palmtx_battery.c b/drivers/power/palmtx_battery.c
new file mode 100644
index 00000000000..244bb273a63
--- /dev/null
+++ b/drivers/power/palmtx_battery.c
@@ -0,0 +1,198 @@
+/*
+ * linux/drivers/power/palmtx_battery.c
+ *
+ * Battery measurement code for Palm T|X Handheld computer
+ *
+ * based on tosa_battery.c
+ *
+ * Copyright (C) 2008 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/wm97xx.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+
+#include <asm/mach-types.h>
+#include <asm/arch/palmtx.h>
+
+static DEFINE_MUTEX(bat_lock);
+static struct work_struct bat_work;
+struct mutex work_lock;
+int bat_status = POWER_SUPPLY_STATUS_DISCHARGING;
+
+static unsigned long palmtx_read_bat(struct power_supply *bat_ps)
+{
+ return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data,
+ WM97XX_AUX_ID3) * 1000 / 414;
+}
+
+static unsigned long palmtx_read_temp(struct power_supply *bat_ps)
+{
+ return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data,
+ WM97XX_AUX_ID2);
+}
+
+static int palmtx_bat_get_property(struct power_supply *bat_ps,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = bat_status;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LIPO;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = palmtx_read_bat(bat_ps);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = PALMTX_BAT_MAX_VOLTAGE;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = PALMTX_BAT_MIN_VOLTAGE;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = palmtx_read_temp(bat_ps);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void palmtx_bat_external_power_changed(struct power_supply *bat_ps)
+{
+ schedule_work(&bat_work);
+}
+
+static char *status_text[] = {
+ [POWER_SUPPLY_STATUS_UNKNOWN] = "Unknown",
+ [POWER_SUPPLY_STATUS_CHARGING] = "Charging",
+ [POWER_SUPPLY_STATUS_DISCHARGING] = "Discharging",
+};
+
+static void palmtx_bat_update(struct power_supply *bat_ps)
+{
+ int old_status = bat_status;
+
+ mutex_lock(&work_lock);
+
+ bat_status = gpio_get_value(GPIO_NR_PALMTX_POWER_DETECT) ?
+ POWER_SUPPLY_STATUS_CHARGING :
+ POWER_SUPPLY_STATUS_DISCHARGING;
+
+ if (old_status != bat_status) {
+ pr_debug("%s %s -> %s\n", bat_ps->name,
+ status_text[old_status],
+ status_text[bat_status]);
+ power_supply_changed(bat_ps);
+ }
+
+ mutex_unlock(&work_lock);
+}
+
+static enum power_supply_property palmtx_bat_main_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_PRESENT,
+};
+
+struct power_supply bat_ps = {
+ .name = "main-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = palmtx_bat_main_props,
+ .num_properties = ARRAY_SIZE(palmtx_bat_main_props),
+ .get_property = palmtx_bat_get_property,
+ .external_power_changed = palmtx_bat_external_power_changed,
+ .use_for_apm = 1,
+};
+
+static void palmtx_bat_work(struct work_struct *work)
+{
+ palmtx_bat_update(&bat_ps);
+}
+
+#ifdef CONFIG_PM
+static int palmtx_bat_suspend(struct platform_device *dev, pm_message_t state)
+{
+ flush_scheduled_work();
+ return 0;
+}
+
+static int palmtx_bat_resume(struct platform_device *dev)
+{
+ schedule_work(&bat_work);
+ return 0;
+}
+#else
+#define palmtx_bat_suspend NULL
+#define palmtx_bat_resume NULL
+#endif
+
+static int __devinit palmtx_bat_probe(struct platform_device *dev)
+{
+ int ret = 0;
+
+ if (!machine_is_palmtx())
+ return -ENODEV;
+
+ mutex_init(&work_lock);
+
+ INIT_WORK(&bat_work, palmtx_bat_work);
+
+ ret = power_supply_register(&dev->dev, &bat_ps);
+ if (!ret)
+ schedule_work(&bat_work);
+
+ return ret;
+}
+
+static int __devexit palmtx_bat_remove(struct platform_device *dev)
+{
+ power_supply_unregister(&bat_ps);
+ return 0;
+}
+
+static struct platform_driver palmtx_bat_driver = {
+ .driver.name = "wm97xx-battery",
+ .driver.owner = THIS_MODULE,
+ .probe = palmtx_bat_probe,
+ .remove = __devexit_p(palmtx_bat_remove),
+ .suspend = palmtx_bat_suspend,
+ .resume = palmtx_bat_resume,
+};
+
+static int __init palmtx_bat_init(void)
+{
+ return platform_driver_register(&palmtx_bat_driver);
+}
+
+static void __exit palmtx_bat_exit(void)
+{
+ platform_driver_unregister(&palmtx_bat_driver);
+}
+
+module_init(palmtx_bat_init);
+module_exit(palmtx_bat_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
+MODULE_DESCRIPTION("Palm T|X battery driver");
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index f843c1383a4..538552495d4 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -84,7 +84,6 @@ typedef struct ide_scsi_obj {
struct Scsi_Host *host;
struct ide_atapi_pc *pc; /* Current packet command */
- unsigned long flags; /* Status/Action flags */
unsigned long transform; /* SCSI cmd translation layer */
unsigned long log; /* log flags */
} idescsi_scsi_t;
@@ -126,23 +125,14 @@ static inline idescsi_scsi_t *drive_to_idescsi(ide_drive_t *ide_drive)
}
/*
- * Per ATAPI device status bits.
- */
-#define IDESCSI_DRQ_INTERRUPT 0 /* DRQ interrupt device */
-
-/*
- * ide-scsi requests.
- */
-#define IDESCSI_PC_RQ 90
-
-/*
* PIO data transfer routine using the scatter gather table.
*/
static void ide_scsi_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
unsigned int bcount, int write)
{
ide_hwif_t *hwif = drive->hwif;
- xfer_func_t *xf = write ? hwif->output_data : hwif->input_data;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ xfer_func_t *xf = write ? tp_ops->output_data : tp_ops->input_data;
char *buf;
int count;
@@ -228,7 +218,6 @@ static int idescsi_check_condition(ide_drive_t *drive,
rq->cmd_type = REQ_TYPE_SENSE;
rq->cmd_flags |= REQ_PREEMPT;
pc->timeout = jiffies + WAIT_READY;
- pc->callback = ide_scsi_callback;
/* NOTE! Save the failed packet command in "rq->buffer" */
rq->buffer = (void *) failed_cmd->special;
pc->scsi_cmd = ((struct ide_atapi_pc *) failed_cmd->special)->scsi_cmd;
@@ -237,6 +226,7 @@ static int idescsi_check_condition(ide_drive_t *drive,
ide_scsi_hex_dump(pc->c, 6);
}
rq->rq_disk = scsi->disk;
+ memcpy(rq->cmd, pc->c, 12);
ide_do_drive_cmd(drive, rq);
return 0;
}
@@ -246,10 +236,9 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
{
ide_hwif_t *hwif = drive->hwif;
- if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
+ if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
/* force an abort */
- hwif->OUTBSYNC(hwif, WIN_IDLEIMMEDIATE,
- hwif->io_ports.command_addr);
+ hwif->tp_ops->exec_command(hwif, WIN_IDLEIMMEDIATE);
rq->errors++;
@@ -421,10 +410,6 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r
if (blk_sense_request(rq) || blk_special_request(rq)) {
struct ide_atapi_pc *pc = (struct ide_atapi_pc *)rq->special;
- idescsi_scsi_t *scsi = drive_to_idescsi(drive);
-
- if (test_bit(IDESCSI_DRQ_INTERRUPT, &scsi->flags))
- pc->flags |= PC_FLAG_DRQ_INTERRUPT;
if (drive->using_dma && !idescsi_map_sg(drive, pc))
pc->flags |= PC_FLAG_DMA_OK;
@@ -460,11 +445,14 @@ static inline void idescsi_add_settings(ide_drive_t *drive) { ; }
static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi)
{
if (drive->id && (drive->id->config & 0x0060) == 0x20)
- set_bit (IDESCSI_DRQ_INTERRUPT, &scsi->flags);
+ set_bit(IDE_AFLAG_DRQ_INTERRUPT, &drive->atapi_flags);
clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform);
#if IDESCSI_DEBUG_LOG
set_bit(IDESCSI_LOG_CMD, &scsi->log);
#endif /* IDESCSI_DEBUG_LOG */
+
+ drive->pc_callback = ide_scsi_callback;
+
idescsi_add_settings(drive);
}
@@ -616,7 +604,6 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
pc->scsi_cmd = cmd;
pc->done = done;
pc->timeout = jiffies + cmd->timeout_per_command;
- pc->callback = ide_scsi_callback;
if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
@@ -631,6 +618,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
rq->special = (char *) pc;
rq->cmd_type = REQ_TYPE_SPECIAL;
spin_unlock_irq(host->host_lock);
+ memcpy(rq->cmd, pc->c, 12);
blk_execute_rq_nowait(drive->queue, scsi->disk, rq, 0, NULL);
spin_lock_irq(host->host_lock);
return 0;
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index 93e407ee08b..1ff80de177d 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -201,6 +201,10 @@ static void cpm_uart_int_tx(struct uart_port *port)
cpm_uart_tx_pump(port);
}
+#ifdef CONFIG_CONSOLE_POLL
+static int serial_polled;
+#endif
+
/*
* Receive characters
*/
@@ -222,6 +226,12 @@ static void cpm_uart_int_rx(struct uart_port *port)
*/
bdp = pinfo->rx_cur;
for (;;) {
+#ifdef CONFIG_CONSOLE_POLL
+ if (unlikely(serial_polled)) {
+ serial_polled = 0;
+ return;
+ }
+#endif
/* get status */
status = in_be16(&bdp->cbd_sc);
/* If this one is empty, return happy */
@@ -253,7 +263,12 @@ static void cpm_uart_int_rx(struct uart_port *port)
goto handle_error;
if (uart_handle_sysrq_char(port, ch))
continue;
-
+#ifdef CONFIG_CONSOLE_POLL
+ if (unlikely(serial_polled)) {
+ serial_polled = 0;
+ return;
+ }
+#endif
error_return:
tty_insert_flip_char(tty, ch, flg);
@@ -865,6 +880,80 @@ static void cpm_uart_config_port(struct uart_port *port, int flags)
cpm_uart_request_port(port);
}
}
+
+#ifdef CONFIG_CONSOLE_POLL
+/* Serial polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+ */
+
+#define GDB_BUF_SIZE 512 /* power of 2, please */
+
+static char poll_buf[GDB_BUF_SIZE];
+static char *pollp;
+static int poll_chars;
+
+static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
+{
+ u_char c, *cp;
+ volatile cbd_t *bdp;
+ int i;
+
+ /* Get the address of the host memory buffer.
+ */
+ bdp = pinfo->rx_cur;
+ while (bdp->cbd_sc & BD_SC_EMPTY)
+ ;
+
+ /* If the buffer address is in the CPM DPRAM, don't
+ * convert it.
+ */
+ cp = cpm2cpu_addr(bdp->cbd_bufaddr, pinfo);
+
+ if (obuf) {
+ i = c = bdp->cbd_datlen;
+ while (i-- > 0)
+ *obuf++ = *cp++;
+ } else
+ c = *cp;
+ bdp->cbd_sc &= ~(BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID);
+ bdp->cbd_sc |= BD_SC_EMPTY;
+
+ if (bdp->cbd_sc & BD_SC_WRAP)
+ bdp = pinfo->rx_bd_base;
+ else
+ bdp++;
+ pinfo->rx_cur = (cbd_t *)bdp;
+
+ return (int)c;
+}
+
+static int cpm_get_poll_char(struct uart_port *port)
+{
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+
+ if (!serial_polled) {
+ serial_polled = 1;
+ poll_chars = 0;
+ }
+ if (poll_chars <= 0) {
+ poll_chars = poll_wait_key(poll_buf, pinfo);
+ pollp = poll_buf;
+ }
+ poll_chars--;
+ return *pollp++;
+}
+
+static void cpm_put_poll_char(struct uart_port *port,
+ unsigned char c)
+{
+ struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
+ static char ch[2];
+
+ ch[0] = (char)c;
+ cpm_uart_early_write(pinfo->port.line, ch, 1);
+}
+#endif /* CONFIG_CONSOLE_POLL */
+
static struct uart_ops cpm_uart_pops = {
.tx_empty = cpm_uart_tx_empty,
.set_mctrl = cpm_uart_set_mctrl,
@@ -882,6 +971,10 @@ static struct uart_ops cpm_uart_pops = {
.request_port = cpm_uart_request_port,
.config_port = cpm_uart_config_port,
.verify_port = cpm_uart_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = cpm_get_poll_char,
+ .poll_put_char = cpm_put_poll_char,
+#endif
};
struct uart_cpm_port cpm_uart_ports[UART_NR];
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c
index c9f53e71f25..61d3ade5286 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/serial/mpsc.c
@@ -921,6 +921,10 @@ static int mpsc_make_ready(struct mpsc_port_info *pi)
return 0;
}
+#ifdef CONFIG_CONSOLE_POLL
+static int serial_polled;
+#endif
+
/*
******************************************************************************
*
@@ -956,7 +960,12 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi)
while (!((cmdstat = be32_to_cpu(rxre->cmdstat))
& SDMA_DESC_CMDSTAT_O)) {
bytes_in = be16_to_cpu(rxre->bytecnt);
-
+#ifdef CONFIG_CONSOLE_POLL
+ if (unlikely(serial_polled)) {
+ serial_polled = 0;
+ return 0;
+ }
+#endif
/* Following use of tty struct directly is deprecated */
if (unlikely(tty_buffer_request_room(tty, bytes_in)
< bytes_in)) {
@@ -1017,6 +1026,12 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi)
if (uart_handle_sysrq_char(&pi->port, *bp)) {
bp++;
bytes_in--;
+#ifdef CONFIG_CONSOLE_POLL
+ if (unlikely(serial_polled)) {
+ serial_polled = 0;
+ return 0;
+ }
+#endif
goto next_frame;
}
@@ -1519,6 +1534,133 @@ static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser)
return rc;
}
+#ifdef CONFIG_CONSOLE_POLL
+/* Serial polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+ */
+
+static char poll_buf[2048];
+static int poll_ptr;
+static int poll_cnt;
+static void mpsc_put_poll_char(struct uart_port *port,
+ unsigned char c);
+
+static int mpsc_get_poll_char(struct uart_port *port)
+{
+ struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ struct mpsc_rx_desc *rxre;
+ u32 cmdstat, bytes_in, i;
+ u8 *bp;
+
+ if (!serial_polled)
+ serial_polled = 1;
+
+ pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
+
+ if (poll_cnt) {
+ poll_cnt--;
+ return poll_buf[poll_ptr++];
+ }
+ poll_ptr = 0;
+ poll_cnt = 0;
+
+ while (poll_cnt == 0) {
+ rxre = (struct mpsc_rx_desc *)(pi->rxr +
+ (pi->rxr_posn*MPSC_RXRE_SIZE));
+ dma_cache_sync(pi->port.dev, (void *)rxre,
+ MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ invalidate_dcache_range((ulong)rxre,
+ (ulong)rxre + MPSC_RXRE_SIZE);
+#endif
+ /*
+ * Loop through Rx descriptors handling ones that have
+ * been completed.
+ */
+ while (poll_cnt == 0 &&
+ !((cmdstat = be32_to_cpu(rxre->cmdstat)) &
+ SDMA_DESC_CMDSTAT_O)){
+ bytes_in = be16_to_cpu(rxre->bytecnt);
+ bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
+ dma_cache_sync(pi->port.dev, (void *) bp,
+ MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ invalidate_dcache_range((ulong)bp,
+ (ulong)bp + MPSC_RXBE_SIZE);
+#endif
+ if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR |
+ SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) &&
+ !(cmdstat & pi->port.ignore_status_mask)) {
+ poll_buf[poll_cnt] = *bp;
+ poll_cnt++;
+ } else {
+ for (i = 0; i < bytes_in; i++) {
+ poll_buf[poll_cnt] = *bp++;
+ poll_cnt++;
+ }
+ pi->port.icount.rx += bytes_in;
+ }
+ rxre->bytecnt = cpu_to_be16(0);
+ wmb();
+ rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
+ SDMA_DESC_CMDSTAT_EI |
+ SDMA_DESC_CMDSTAT_F |
+ SDMA_DESC_CMDSTAT_L);
+ wmb();
+ dma_cache_sync(pi->port.dev, (void *)rxre,
+ MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ flush_dcache_range((ulong)rxre,
+ (ulong)rxre + MPSC_RXRE_SIZE);
+#endif
+
+ /* Advance to next descriptor */
+ pi->rxr_posn = (pi->rxr_posn + 1) &
+ (MPSC_RXR_ENTRIES - 1);
+ rxre = (struct mpsc_rx_desc *)(pi->rxr +
+ (pi->rxr_posn * MPSC_RXRE_SIZE));
+ dma_cache_sync(pi->port.dev, (void *)rxre,
+ MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
+ invalidate_dcache_range((ulong)rxre,
+ (ulong)rxre + MPSC_RXRE_SIZE);
+#endif
+ }
+
+ /* Restart rx engine, if its stopped */
+ if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
+ mpsc_start_rx(pi);
+ }
+ if (poll_cnt) {
+ poll_cnt--;
+ return poll_buf[poll_ptr++];
+ }
+
+ return 0;
+}
+
+
+static void mpsc_put_poll_char(struct uart_port *port,
+ unsigned char c)
+{
+ struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
+ u32 data;
+
+ data = readl(pi->mpsc_base + MPSC_MPCR);
+ writeb(c, pi->mpsc_base + MPSC_CHR_1);
+ mb();
+ data = readl(pi->mpsc_base + MPSC_CHR_2);
+ data |= MPSC_CHR_2_TTCS;
+ writel(data, pi->mpsc_base + MPSC_CHR_2);
+ mb();
+
+ while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS);
+}
+#endif
static struct uart_ops mpsc_pops = {
.tx_empty = mpsc_tx_empty,
@@ -1537,6 +1679,10 @@ static struct uart_ops mpsc_pops = {
.request_port = mpsc_request_port,
.config_port = mpsc_config_port,
.verify_port = mpsc_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = mpsc_get_poll_char,
+ .poll_put_char = mpsc_put_poll_char,
+#endif
};
/*
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index fbd6289977c..8fb0066609b 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -152,9 +152,10 @@ static int is_vbus_present(void)
static void pullup_off(void)
{
struct pxa2xx_udc_mach_info *mach = the_controller->mach;
+ int off_level = mach->gpio_pullup_inverted;
if (mach->gpio_pullup)
- gpio_set_value(mach->gpio_pullup, 0);
+ gpio_set_value(mach->gpio_pullup, off_level);
else if (mach->udc_command)
mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
}
@@ -162,9 +163,10 @@ static void pullup_off(void)
static void pullup_on(void)
{
struct pxa2xx_udc_mach_info *mach = the_controller->mach;
+ int on_level = !mach->gpio_pullup_inverted;
if (mach->gpio_pullup)
- gpio_set_value(mach->gpio_pullup, 1);
+ gpio_set_value(mach->gpio_pullup, on_level);
else if (mach->udc_command)
mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
}
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index d0746261c95..bb251436950 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -227,6 +227,22 @@ static int pxafb_bpp_to_lccr3(struct fb_var_screeninfo *var)
case 4: ret = LCCR3_4BPP; break;
case 8: ret = LCCR3_8BPP; break;
case 16: ret = LCCR3_16BPP; break;
+ case 24:
+ switch (var->red.length + var->green.length +
+ var->blue.length + var->transp.length) {
+ case 18: ret = LCCR3_18BPP_P | LCCR3_PDFOR_3; break;
+ case 19: ret = LCCR3_19BPP_P; break;
+ }
+ break;
+ case 32:
+ switch (var->red.length + var->green.length +
+ var->blue.length + var->transp.length) {
+ case 18: ret = LCCR3_18BPP | LCCR3_PDFOR_3; break;
+ case 19: ret = LCCR3_19BPP; break;
+ case 24: ret = LCCR3_24BPP | LCCR3_PDFOR_3; break;
+ case 25: ret = LCCR3_25BPP; break;
+ }
+ break;
}
return ret;
}
@@ -345,6 +361,41 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
var->green.offset = 5; var->green.length = 6;
var->blue.offset = 0; var->blue.length = 5;
var->transp.offset = var->transp.length = 0;
+ } else if (var->bits_per_pixel > 16) {
+ struct pxafb_mode_info *mode;
+
+ mode = pxafb_getmode(inf, var);
+ if (!mode)
+ return -EINVAL;
+
+ switch (mode->depth) {
+ case 18: /* RGB666 */
+ var->transp.offset = var->transp.length = 0;
+ var->red.offset = 12; var->red.length = 6;
+ var->green.offset = 6; var->green.length = 6;
+ var->blue.offset = 0; var->blue.length = 6;
+ break;
+ case 19: /* RGBT666 */
+ var->transp.offset = 18; var->transp.length = 1;
+ var->red.offset = 12; var->red.length = 6;
+ var->green.offset = 6; var->green.length = 6;
+ var->blue.offset = 0; var->blue.length = 6;
+ break;
+ case 24: /* RGB888 */
+ var->transp.offset = var->transp.length = 0;
+ var->red.offset = 16; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 0; var->blue.length = 8;
+ break;
+ case 25: /* RGBT888 */
+ var->transp.offset = 24; var->transp.length = 1;
+ var->red.offset = 16; var->red.length = 8;
+ var->green.offset = 8; var->green.length = 8;
+ var->blue.offset = 0; var->blue.length = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
} else {
var->red.offset = var->green.offset = 0;
var->blue.offset = var->transp.offset = 0;
@@ -376,7 +427,7 @@ static int pxafb_set_par(struct fb_info *info)
struct pxafb_info *fbi = (struct pxafb_info *)info;
struct fb_var_screeninfo *var = &info->var;
- if (var->bits_per_pixel == 16)
+ if (var->bits_per_pixel >= 16)
fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR;
else if (!fbi->cmap_static)
fbi->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR;
@@ -391,7 +442,7 @@ static int pxafb_set_par(struct fb_info *info)
fbi->fb.fix.line_length = var->xres_virtual *
var->bits_per_pixel / 8;
- if (var->bits_per_pixel == 16)
+ if (var->bits_per_pixel >= 16)
fbi->palette_size = 0;
else
fbi->palette_size = var->bits_per_pixel == 1 ?
@@ -404,7 +455,7 @@ static int pxafb_set_par(struct fb_info *info)
*/
pxafb_set_truecolor(fbi->fb.fix.visual == FB_VISUAL_TRUECOLOR);
- if (fbi->fb.var.bits_per_pixel == 16)
+ if (fbi->fb.var.bits_per_pixel >= 16)
fb_dealloc_cmap(&fbi->fb.cmap);
else
fb_alloc_cmap(&fbi->fb.cmap, 1<<fbi->fb.var.bits_per_pixel, 0);
@@ -831,6 +882,8 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var,
case 4:
case 8:
case 16:
+ case 24:
+ case 32:
break;
default:
printk(KERN_ERR "%s: invalid bit depth %d\n",
@@ -968,6 +1021,11 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi)
for (gpio = 58; ldd_bits; gpio++, ldd_bits--)
pxa_gpio_mode(gpio | GPIO_ALT_FN_2_OUT);
+ /* 18 bit interface */
+ if (fbi->fb.var.bits_per_pixel > 16) {
+ pxa_gpio_mode(86 | GPIO_ALT_FN_2_OUT);
+ pxa_gpio_mode(87 | GPIO_ALT_FN_2_OUT);
+ }
pxa_gpio_mode(GPIO74_LCD_FCLK_MD);
pxa_gpio_mode(GPIO75_LCD_LCLK_MD);
pxa_gpio_mode(GPIO76_LCD_PCLK_MD);