aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/cpu-freq/core.txt2
-rw-r--r--Documentation/feature-removal-schedule.txt22
-rw-r--r--Documentation/kbuild/kconfig-language.txt8
-rw-r--r--MAINTAINERS8
-rw-r--r--Makefile4
-rw-r--r--arch/arm/mach-pxa/Kconfig16
-rw-r--r--arch/arm/plat-omap/Kconfig2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig6
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Makefile2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c813
-rw-r--r--arch/i386/kernel/cpu/cpufreq/gx-suspmod.c4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c8
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c38
-rw-r--r--arch/i386/kernel/cpu/cpufreq/sc520_freq.c7
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c15
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.c32
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.h1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-smi.c3
-rw-r--r--arch/powerpc/platforms/4xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig2
-rw-r--r--arch/ppc/Kconfig4
-rw-r--r--arch/ppc/platforms/4xx/Kconfig2
-rw-r--r--arch/um/drivers/net_kern.c3
-rw-r--r--arch/v850/Kconfig28
-rw-r--r--arch/x86_64/kernel/cpufreq/Kconfig6
-rw-r--r--arch/x86_64/kernel/cpufreq/Makefile2
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/watchdog/at91rm9200_wdt.c6
-rw-r--r--drivers/char/watchdog/mpcore_wdt.c2
-rw-r--r--drivers/char/watchdog/omap_wdt.c2
-rw-r--r--drivers/char/watchdog/pcwd_usb.c5
-rw-r--r--drivers/char/watchdog/rm9k_wdt.c44
-rw-r--r--drivers/cpufreq/cpufreq.c153
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c33
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c33
-rw-r--r--drivers/cpufreq/cpufreq_performance.c9
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c9
-rw-r--r--drivers/cpufreq/cpufreq_stats.c11
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c11
-rw-r--r--drivers/cpufreq/freq_table.c28
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/infiniband/core/Makefile6
-rw-r--r--drivers/infiniband/core/cm.c4
-rw-r--r--drivers/infiniband/core/cma.c416
-rw-r--r--drivers/infiniband/core/fmr_pool.c12
-rw-r--r--drivers/infiniband/core/mad.c90
-rw-r--r--drivers/infiniband/core/mad_priv.h6
-rw-r--r--drivers/infiniband/core/ucma.c874
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c5
-rw-r--r--drivers/infiniband/core/uverbs_mem.c12
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c13
-rw-r--r--drivers/infiniband/hw/ipath/Makefile1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_dma.c189
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c75
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c3
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c125
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c81
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h2
-rw-r--r--drivers/leds/Kconfig22
-rw-r--r--drivers/media/video/ov7670.c2
-rw-r--r--drivers/net/e100.c3
-rw-r--r--drivers/serial/Kconfig4
-rw-r--r--drivers/serial/icom.c2
-rw-r--r--fs/Kconfig14
-rw-r--r--fs/binfmt_elf_fdpic.c3
-rw-r--r--include/asm-i386/msr.h5
-rw-r--r--include/asm-x86_64/msr.h4
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/configfs.h25
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/rdma/ib_marshall.h5
-rw-r--r--include/rdma/ib_verbs.h253
-rw-r--r--include/rdma/rdma_cm.h62
-rw-r--r--include/rdma/rdma_cm_ib.h3
-rw-r--r--include/rdma/rdma_user_cm.h206
-rw-r--r--init/Kconfig2
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--sound/aoa/fabrics/Kconfig2
94 files changed, 3073 insertions, 916 deletions
diff --git a/Documentation/cpu-freq/core.txt b/Documentation/cpu-freq/core.txt
index 29b3f9ffc66..ce0666e5103 100644
--- a/Documentation/cpu-freq/core.txt
+++ b/Documentation/cpu-freq/core.txt
@@ -24,7 +24,7 @@ Contents:
1. General Information
=======================
-The CPUFreq core code is located in linux/kernel/cpufreq.c. This
+The CPUFreq core code is located in drivers/cpufreq/cpufreq.c. This
cpufreq code offers a standardized interface for the CPUFreq
architecture drivers (those pieces of code that do actual
frequency transitions), as well as to "notifiers". These are device
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 031029e89fd..64ce44da593 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -259,3 +259,25 @@ Why: The new layer 3 independant connection tracking replaces the old
Who: Patrick McHardy <kaber@trash.net>
---------------------------
+
+What: ACPI hooks (X86_SPEEDSTEP_CENTRINO_ACPI) in speedstep-centrino driver
+When: December 2006
+Why: Speedstep-centrino driver with ACPI hooks and acpi-cpufreq driver are
+ functionally very much similar. They talk to ACPI in same way. Only
+ difference between them is the way they do frequency transitions.
+ One uses MSRs and the other one uses IO ports. Functionaliy of
+ speedstep_centrino with ACPI hooks is now merged into acpi-cpufreq.
+ That means one common driver will support all Intel Enhanced Speedstep
+ capable CPUs. That means less confusion over name of
+ speedstep-centrino driver (with that driver supposed to be used on
+ non-centrino platforms). That means less duplication of code and
+ less maintenance effort and no possibility of these two drivers
+ going out of sync.
+ Current users of speedstep_centrino with ACPI hooks are requested to
+ switch over to acpi-cpufreq driver. speedstep-centrino will continue
+ to work using older non-ACPI static table based scheme even after this
+ date.
+
+Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+
+---------------------------
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index 125093c3ef7..536d5bfbdb8 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -29,7 +29,7 @@ them. A single configuration option is defined like this:
config MODVERSIONS
bool "Set version information on all module symbols"
- depends MODULES
+ depends on MODULES
help
Usually, modules have to be recompiled whenever you switch to a new
kernel. ...
@@ -163,7 +163,7 @@ The position of a menu entry in the tree is determined in two ways. First
it can be specified explicitly:
menu "Network device support"
- depends NET
+ depends on NET
config NETDEVICES
...
@@ -188,10 +188,10 @@ config MODULES
config MODVERSIONS
bool "Set version information on all module symbols"
- depends MODULES
+ depends on MODULES
comment "module support disabled"
- depends !MODULES
+ depends on !MODULES
MODVERSIONS directly depends on MODULES, this means it's only visible if
MODULES is different from 'n'. The comment on the other hand is always
diff --git a/MAINTAINERS b/MAINTAINERS
index b2024938adc..8a0bfeca55c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -740,7 +740,7 @@ P: Dave Jones
M: davej@codemonkey.org.uk
L: cpufreq@lists.linux.org.uk
W: http://www.codemonkey.org.uk/projects/cpufreq/
-T: git kernel.org/pub/scm/linux/kernel/davej/cpufreq.git
+T: git kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git
S: Maintained
CPUID/MSR DRIVER
@@ -1504,8 +1504,10 @@ T: git kernel.org:/pub/scm/linux/kernel/git/dtor/input.git
S: Maintained
INOTIFY
-P: John McCutchan and Robert Love
-M: ttb@tentacle.dhs.org and rml@novell.com
+P: John McCutchan
+M: ttb@tentacle.dhs.org
+P: Robert Love
+M: rml@novell.com
L: linux-kernel@vger.kernel.org
S: Maintained
diff --git a/Makefile b/Makefile
index 73e825b39a0..4eabaa8afbf 100644
--- a/Makefile
+++ b/Makefile
@@ -1100,9 +1100,9 @@ boards := $(notdir $(boards))
help:
@echo 'Cleaning targets:'
- @echo ' clean - remove most generated files but keep the config and'
+ @echo ' clean - Remove most generated files but keep the config and'
@echo ' enough build support to build external modules'
- @echo ' mrproper - remove all generated files + config + various backup files'
+ @echo ' mrproper - Remove all generated files + config + various backup files'
@echo ' distclean - mrproper + remove editor backup and patch files'
@echo ''
@echo 'Configuration targets:'
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 9e3d0bdcba0..5c0a10041cd 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -75,28 +75,28 @@ endmenu
config MACH_POODLE
bool "Enable Sharp SL-5600 (Poodle) Support"
- depends PXA_SHARPSL_25x
+ depends on PXA_SHARPSL_25x
select SHARP_LOCOMO
select PXA_SSP
config MACH_CORGI
bool "Enable Sharp SL-C700 (Corgi) Support"
- depends PXA_SHARPSL_25x
+ depends on PXA_SHARPSL_25x
select PXA_SHARP_C7xx
config MACH_SHEPHERD
bool "Enable Sharp SL-C750 (Shepherd) Support"
- depends PXA_SHARPSL_25x
+ depends on PXA_SHARPSL_25x
select PXA_SHARP_C7xx
config MACH_HUSKY
bool "Enable Sharp SL-C760 (Husky) Support"
- depends PXA_SHARPSL_25x
+ depends on PXA_SHARPSL_25x
select PXA_SHARP_C7xx
config MACH_AKITA
bool "Enable Sharp SL-1000 (Akita) Support"
- depends PXA_SHARPSL_27x
+ depends on PXA_SHARPSL_27x
select PXA_SHARP_Cxx00
select MACH_SPITZ
select I2C
@@ -104,17 +104,17 @@ config MACH_AKITA
config MACH_SPITZ
bool "Enable Sharp Zaurus SL-3000 (Spitz) Support"
- depends PXA_SHARPSL_27x
+ depends on PXA_SHARPSL_27x
select PXA_SHARP_Cxx00
config MACH_BORZOI
bool "Enable Sharp Zaurus SL-3100 (Borzoi) Support"
- depends PXA_SHARPSL_27x
+ depends on PXA_SHARPSL_27x
select PXA_SHARP_Cxx00
config MACH_TOSA
bool "Enable Sharp SL-6000x (Tosa) Support"
- depends PXA_SHARPSL_25x
+ depends on PXA_SHARPSL_25x
config PXA25x
bool
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index ec752e16d61..f2dc363de66 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -113,7 +113,7 @@ endchoice
config OMAP_SERIAL_WAKE
bool "Enable wake-up events for serial ports"
- depends OMAP_MUX
+ depends on OMAP_MUX
default y
help
Select this option if you want to have your system wake up
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index ccc1edff5c9..5299c5bf445 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -17,6 +17,7 @@ config X86_ACPI_CPUFREQ
help
This driver adds a CPUFreq driver which utilizes the ACPI
Processor Performance States.
+ This driver also supports Intel Enhanced Speedstep.
For details, take a look at <file:Documentation/cpu-freq/>.
@@ -121,11 +122,14 @@ config X86_SPEEDSTEP_CENTRINO
If in doubt, say N.
config X86_SPEEDSTEP_CENTRINO_ACPI
- bool "Use ACPI tables to decode valid frequency/voltage pairs"
+ bool "Use ACPI tables to decode valid frequency/voltage (deprecated)"
depends on X86_SPEEDSTEP_CENTRINO && ACPI_PROCESSOR
depends on !(X86_SPEEDSTEP_CENTRINO = y && ACPI_PROCESSOR = m)
default y
help
+ This is deprecated and this functionality is now merged into
+ acpi_cpufreq (X86_ACPI_CPUFREQ). Use that driver instead of
+ speedstep_centrino.
Use primarily the information provided in the BIOS ACPI tables
to determine valid CPU frequency and voltage pairings. It is
required for the driver to work on non-Banias CPUs.
diff --git a/arch/i386/kernel/cpu/cpufreq/Makefile b/arch/i386/kernel/cpu/cpufreq/Makefile
index 2e894f1c891..8de3abe322a 100644
--- a/arch/i386/kernel/cpu/cpufreq/Makefile
+++ b/arch/i386/kernel/cpu/cpufreq/Makefile
@@ -7,9 +7,9 @@ obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o
obj-$(CONFIG_X86_LONGRUN) += longrun.o
obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o
obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
-obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
+obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 57c880bf0bd..18f4715c655 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -1,9 +1,10 @@
/*
- * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.3 $)
+ * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.4 $)
*
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
+ * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -27,202 +28,387 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
#include <linux/cpufreq.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
#include <linux/compiler.h>
-#include <linux/sched.h> /* current */
#include <linux/dmi.h>
-#include <asm/io.h>
-#include <asm/delay.h>
-#include <asm/uaccess.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
+#include <asm/io.h>
+#include <asm/msr.h>
+#include <asm/processor.h>
+#include <asm/cpufeature.h>
+#include <asm/delay.h>
+#include <asm/uaccess.h>
+
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg)
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
MODULE_LICENSE("GPL");
+enum {
+ UNDEFINED_CAPABLE = 0,
+ SYSTEM_INTEL_MSR_CAPABLE,
+ SYSTEM_IO_CAPABLE,
+};
+
+#define INTEL_MSR_RANGE (0xffff)
+#define CPUID_6_ECX_APERFMPERF_CAPABILITY (0x1)
-struct cpufreq_acpi_io {
- struct acpi_processor_performance *acpi_data;
- struct cpufreq_frequency_table *freq_table;
- unsigned int resume;
+struct acpi_cpufreq_data {
+ struct acpi_processor_performance *acpi_data;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int max_freq;
+ unsigned int resume;
+ unsigned int cpu_feature;
};
-static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
-static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
+static struct acpi_cpufreq_data *drv_data[NR_CPUS];
+static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
static struct cpufreq_driver acpi_cpufreq_driver;
static unsigned int acpi_pstate_strict;
-static int
-acpi_processor_write_port(
- u16 port,
- u8 bit_width,
- u32 value)
+static int check_est_cpu(unsigned int cpuid)
+{
+ struct cpuinfo_x86 *cpu = &cpu_data[cpuid];
+
+ if (cpu->x86_vendor != X86_VENDOR_INTEL ||
+ !cpu_has(cpu, X86_FEATURE_EST))
+ return 0;
+
+ return 1;
+}
+
+static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
+{
+ struct acpi_processor_performance *perf;
+ int i;
+
+ perf = data->acpi_data;
+
+ for (i=0; i<perf->state_count; i++) {
+ if (value == perf->states[i].status)
+ return data->freq_table[i].frequency;
+ }
+ return 0;
+}
+
+static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
{
- if (bit_width <= 8) {
+ int i;
+ struct acpi_processor_performance *perf;
+
+ msr &= INTEL_MSR_RANGE;
+ perf = data->acpi_data;
+
+ for (i=0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ if (msr == perf->states[data->freq_table[i].index].status)
+ return data->freq_table[i].frequency;
+ }
+ return data->freq_table[0].frequency;
+}
+
+static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
+{
+ switch (data->cpu_feature) {
+ case SYSTEM_INTEL_MSR_CAPABLE:
+ return extract_msr(val, data);
+ case SYSTEM_IO_CAPABLE:
+ return extract_io(val, data);
+ default:
+ return 0;
+ }
+}
+
+static void wrport(u16 port, u8 bit_width, u32 value)
+{
+ if (bit_width <= 8)
outb(value, port);
- } else if (bit_width <= 16) {
+ else if (bit_width <= 16)
outw(value, port);
- } else if (bit_width <= 32) {
+ else if (bit_width <= 32)
outl(value, port);
- } else {
- return -ENODEV;
- }
- return 0;
}
-static int
-acpi_processor_read_port(
- u16 port,
- u8 bit_width,
- u32 *ret)
+static void rdport(u16 port, u8 bit_width, u32 * ret)
{
*ret = 0;
- if (bit_width <= 8) {
+ if (bit_width <= 8)
*ret = inb(port);
- } else if (bit_width <= 16) {
+ else if (bit_width <= 16)
*ret = inw(port);
- } else if (bit_width <= 32) {
+ else if (bit_width <= 32)
*ret = inl(port);
- } else {
- return -ENODEV;
+}
+
+struct msr_addr {
+ u32 reg;
+};
+
+struct io_addr {
+ u16 port;
+ u8 bit_width;
+};
+
+typedef union {
+ struct msr_addr msr;
+ struct io_addr io;
+} drv_addr_union;
+
+struct drv_cmd {
+ unsigned int type;
+ cpumask_t mask;
+ drv_addr_union addr;
+ u32 val;
+};
+
+static void do_drv_read(struct drv_cmd *cmd)
+{
+ u32 h;
+
+ switch (cmd->type) {
+ case SYSTEM_INTEL_MSR_CAPABLE:
+ rdmsr(cmd->addr.msr.reg, cmd->val, h);
+ break;
+ case SYSTEM_IO_CAPABLE:
+ rdport(cmd->addr.io.port, cmd->addr.io.bit_width, &cmd->val);
+ break;
+ default:
+ break;
}
- return 0;
}
-static int
-acpi_processor_set_performance (
- struct cpufreq_acpi_io *data,
- unsigned int cpu,
- int state)
+static void do_drv_write(struct drv_cmd *cmd)
{
- u16 port = 0;
- u8 bit_width = 0;
- int i = 0;
- int ret = 0;
- u32 value = 0;
- int retval;
- struct acpi_processor_performance *perf;
-
- dprintk("acpi_processor_set_performance\n");
-
- retval = 0;
- perf = data->acpi_data;
- if (state == perf->state) {
- if (unlikely(data->resume)) {
- dprintk("Called after resume, resetting to P%d\n", state);
- data->resume = 0;
- } else {
- dprintk("Already at target state (P%d)\n", state);
- return (retval);
- }
+ u32 h = 0;
+
+ switch (cmd->type) {
+ case SYSTEM_INTEL_MSR_CAPABLE:
+ wrmsr(cmd->addr.msr.reg, cmd->val, h);
+ break;
+ case SYSTEM_IO_CAPABLE:
+ wrport(cmd->addr.io.port, cmd->addr.io.bit_width, cmd->val);
+ break;
+ default:
+ break;
}
+}
- dprintk("Transitioning from P%d to P%d\n", perf->state, state);
+static void drv_read(struct drv_cmd *cmd)
+{
+ cpumask_t saved_mask = current->cpus_allowed;
+ cmd->val = 0;
- /*
- * First we write the target state's 'control' value to the
- * control_register.
- */
+ set_cpus_allowed(current, cmd->mask);
+ do_drv_read(cmd);
+ set_cpus_allowed(current, saved_mask);
+}
+
+static void drv_write(struct drv_cmd *cmd)
+{
+ cpumask_t saved_mask = current->cpus_allowed;
+ unsigned int i;
+
+ for_each_cpu_mask(i, cmd->mask) {
+ set_cpus_allowed(current, cpumask_of_cpu(i));
+ do_drv_write(cmd);
+ }
+
+ set_cpus_allowed(current, saved_mask);
+ return;
+}
+
+static u32 get_cur_val(cpumask_t mask)
+{
+ struct acpi_processor_performance *perf;
+ struct drv_cmd cmd;
+
+ if (unlikely(cpus_empty(mask)))
+ return 0;
+
+ switch (drv_data[first_cpu(mask)]->cpu_feature) {
+ case SYSTEM_INTEL_MSR_CAPABLE:
+ cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
+ cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
+ break;
+ case SYSTEM_IO_CAPABLE:
+ cmd.type = SYSTEM_IO_CAPABLE;
+ perf = drv_data[first_cpu(mask)]->acpi_data;
+ cmd.addr.io.port = perf->control_register.address;
+ cmd.addr.io.bit_width = perf->control_register.bit_width;
+ break;
+ default:
+ return 0;
+ }
+
+ cmd.mask = mask;
- port = perf->control_register.address;
- bit_width = perf->control_register.bit_width;
- value = (u32) perf->states[state].control;
+ drv_read(&cmd);
- dprintk("Writing 0x%08x to port 0x%04x\n", value, port);
+ dprintk("get_cur_val = %u\n", cmd.val);
- ret = acpi_processor_write_port(port, bit_width, value);
- if (ret) {
- dprintk("Invalid port width 0x%04x\n", bit_width);
- return (ret);
+ return cmd.val;
+}
+
+/*
+ * Return the measured active (C0) frequency on this CPU since last call
+ * to this function.
+ * Input: cpu number
+ * Return: Average CPU frequency in terms of max frequency (zero on error)
+ *
+ * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
+ * over a period of time, while CPU is in C0 state.
+ * IA32_MPERF counts at the rate of max advertised frequency
+ * IA32_APERF counts at the rate of actual CPU frequency
+ * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
+ * no meaning should be associated with absolute values of these MSRs.
+ */
+static unsigned int get_measured_perf(unsigned int cpu)
+{
+ union {
+ struct {
+ u32 lo;
+ u32 hi;
+ } split;
+ u64 whole;
+ } aperf_cur, mperf_cur;
+
+ cpumask_t saved_mask;
+ unsigned int perf_percent;
+ unsigned int retval;
+
+ saved_mask = current->cpus_allowed;
+ set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ if (get_cpu() != cpu) {
+ /* We were not able to run on requested processor */
+ put_cpu();
+ return 0;
}
+ rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi);
+ rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi);
+
+ wrmsr(MSR_IA32_APERF, 0,0);
+ wrmsr(MSR_IA32_MPERF, 0,0);
+
+#ifdef __i386__
/*
- * Assume the write went through when acpi_pstate_strict is not used.
- * As read status_register is an expensive operation and there
- * are no specific error cases where an IO port write will fail.
+ * We dont want to do 64 bit divide with 32 bit kernel
+ * Get an approximate value. Return failure in case we cannot get
+ * an approximate value.
*/
- if (acpi_pstate_strict) {
- /* Then we read the 'status_register' and compare the value
- * with the target state's 'status' to make sure the
- * transition was successful.
- * Note that we'll poll for up to 1ms (100 cycles of 10us)
- * before giving up.
- */
-
- port = perf->status_register.address;
- bit_width = perf->status_register.bit_width;
-
- dprintk("Looking for 0x%08x from port 0x%04x\n",
- (u32) perf->states[state].status, port);
-
- for (i = 0; i < 100; i++) {
- ret = acpi_processor_read_port(port, bit_width, &value);
- if (ret) {
- dprintk("Invalid port width 0x%04x\n", bit_width);
- return (ret);
- }
- if (value == (u32) perf->states[state].status)
- break;
- udelay(10);
- }
- } else {
- value = (u32) perf->states[state].status;
+ if (unlikely(aperf_cur.split.hi || mperf_cur.split.hi)) {
+ int shift_count;
+ u32 h;
+
+ h = max_t(u32, aperf_cur.split.hi, mperf_cur.split.hi);
+ shift_count = fls(h);
+
+ aperf_cur.whole >>= shift_count;
+ mperf_cur.whole >>= shift_count;
+ }
+
+ if (((unsigned long)(-1) / 100) < aperf_cur.split.lo) {
+ int shift_count = 7;
+ aperf_cur.split.lo >>= shift_count;
+ mperf_cur.split.lo >>= shift_count;
+ }
+
+ if (aperf_cur.split.lo && mperf_cur.split.lo)
+ perf_percent = (aperf_cur.split.lo * 100) / mperf_cur.split.lo;
+ else
+ perf_percent = 0;
+
+#else
+ if (unlikely(((unsigned long)(-1) / 100) < aperf_cur.whole)) {
+ int shift_count = 7;
+ aperf_cur.whole >>= shift_count;
+ mperf_cur.whole >>= shift_count;
}
- if (unlikely(value != (u32) perf->states[state].status)) {
- printk(KERN_WARNING "acpi-cpufreq: Transition failed\n");
- retval = -ENODEV;
- return (retval);
+ if (aperf_cur.whole && mperf_cur.whole)
+ perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole;
+ else
+ perf_percent = 0;
+
+#endif
+
+ retval = drv_data[cpu]->max_freq * perf_percent / 100;
+
+ put_cpu();
+ set_cpus_allowed(current, saved_mask);
+
+ dprintk("cpu %d: performance percent %d\n", cpu, perf_percent);
+ return retval;
+}
+
+static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
+{
+ struct acpi_cpufreq_data *data = drv_data[cpu];
+ unsigned int freq;
+
+ dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
+
+ if (unlikely(data == NULL ||
+ data->acpi_data == NULL || data->freq_table == NULL)) {
+ return 0;
}
- dprintk("Transition successful after %d microseconds\n", i * 10);
+ freq = extract_freq(get_cur_val(cpumask_of_cpu(cpu)), data);
+ dprintk("cur freq = %u\n", freq);
- perf->state = state;
- return (retval);
+ return freq;
}
+static unsigned int check_freqs(cpumask_t mask, unsigned int freq,
+ struct acpi_cpufreq_data *data)
+{
+ unsigned int cur_freq;
+ unsigned int i;
+
+ for (i=0; i<100; i++) {
+ cur_freq = extract_freq(get_cur_val(mask), data);
+ if (cur_freq == freq)
+ return 1;
+ udelay(10);
+ }
+ return 0;
+}
-static int
-acpi_cpufreq_target (
- struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int acpi_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
{
- struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
+ struct acpi_cpufreq_data *data = drv_data[policy->cpu];
struct acpi_processor_performance *perf;
struct cpufreq_freqs freqs;
cpumask_t online_policy_cpus;
- cpumask_t saved_mask;
- cpumask_t set_mask;
- cpumask_t covered_cpus;
- unsigned int cur_state = 0;
+ struct drv_cmd cmd;
+ unsigned int msr;
unsigned int next_state = 0;
- unsigned int result = 0;
- unsigned int j;
- unsigned int tmp;
+ unsigned int next_perf_state = 0;
+ unsigned int i;
+ int result = 0;
- dprintk("acpi_cpufreq_setpolicy\n");
+ dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
- result = cpufreq_frequency_table_target(policy,
- data->freq_table,
- target_freq,
- relation,
- &next_state);
- if (unlikely(result))
- return (result);
+ if (unlikely(data == NULL ||
+ data->acpi_data == NULL || data->freq_table == NULL)) {
+ return -ENODEV;
+ }
perf = data->acpi_data;
- cur_state = perf->state;
- freqs.old = data->freq_table[cur_state].frequency;
- freqs.new = data->freq_table[next_state].frequency;
+ result = cpufreq_frequency_table_target(policy,
+ data->freq_table,
+ target_freq,
+ relation, &next_state);
+ if (unlikely(result))
+ return -ENODEV;
#ifdef CONFIG_HOTPLUG_CPU
/* cpufreq holds the hotplug lock, so we are safe from here on */
@@ -231,106 +417,84 @@ acpi_cpufreq_target (
online_policy_cpus = policy->cpus;
#endif
- for_each_cpu_mask(j, online_policy_cpus) {
- freqs.cpu = j;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ next_perf_state = data->freq_table[next_state].index;
+ if (perf->state == next_perf_state) {
+ if (unlikely(data->resume)) {
+ dprintk("Called after resume, resetting to P%d\n",
+ next_perf_state);
+ data->resume = 0;
+ } else {
+ dprintk("Already at target state (P%d)\n",
+ next_perf_state);
+ return 0;
+ }
}
- /*
- * We need to call driver->target() on all or any CPU in
- * policy->cpus, depending on policy->shared_type.
- */
- saved_mask = current->cpus_allowed;
- cpus_clear(covered_cpus);
- for_each_cpu_mask(j, online_policy_cpus) {
- /*
- * Support for SMP systems.
- * Make sure we are running on CPU that wants to change freq
- */
- cpus_clear(set_mask);
- if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
- cpus_or(set_mask, set_mask, online_policy_cpus);
- else
- cpu_set(j, set_mask);
-
- set_cpus_allowed(current, set_mask);
- if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
- dprintk("couldn't limit to CPUs in this domain\n");
- result = -EAGAIN;
- break;
- }
+ switch (data->cpu_feature) {
+ case SYSTEM_INTEL_MSR_CAPABLE:
+ cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
+ cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
+ msr =
+ (u32) perf->states[next_perf_state].
+ control & INTEL_MSR_RANGE;
+ cmd.val = (cmd.val & ~INTEL_MSR_RANGE) | msr;
+ break;
+ case SYSTEM_IO_CAPABLE:
+ cmd.type = SYSTEM_IO_CAPABLE;
+ cmd.addr.io.port = perf->control_register.address;
+ cmd.addr.io.bit_width = perf->control_register.bit_width;
+ cmd.val = (u32) perf->states[next_perf_state].control;
+ break;
+ default:
+ return -ENODEV;
+ }
- result = acpi_processor_set_performance (data, j, next_state);
- if (result) {
- result = -EAGAIN;
- break;
- }
+ cpus_clear(cmd.mask);
- if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
- break;
-
- cpu_set(j, covered_cpus);
- }
+ if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
+ cmd.mask = online_policy_cpus;
+ else
+ cpu_set(policy->cpu, cmd.mask);
- for_each_cpu_mask(j, online_policy_cpus) {
- freqs.cpu = j;
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ freqs.old = data->freq_table[perf->state].frequency;
+ freqs.new = data->freq_table[next_perf_state].frequency;
+ for_each_cpu_mask(i, cmd.mask) {
+ freqs.cpu = i;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
- if (unlikely(result)) {
- /*
- * We have failed halfway through the frequency change.
- * We have sent callbacks to online_policy_cpus and
- * acpi_processor_set_performance() has been called on
- * coverd_cpus. Best effort undo..
- */
-
- if (!cpus_empty(covered_cpus)) {
- for_each_cpu_mask(j, covered_cpus) {
- policy->cpu = j;
- acpi_processor_set_performance (data,
- j,
- cur_state);
- }
- }
+ drv_write(&cmd);
- tmp = freqs.new;
- freqs.new = freqs.old;
- freqs.old = tmp;
- for_each_cpu_mask(j, online_policy_cpus) {
- freqs.cpu = j;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ if (acpi_pstate_strict) {
+ if (!check_freqs(cmd.mask, freqs.new, data)) {
+ dprintk("acpi_cpufreq_target failed (%d)\n",
+ policy->cpu);
+ return -EAGAIN;
}
}
- set_cpus_allowed(current, saved_mask);
- return (result);
-}
+ for_each_cpu_mask(i, cmd.mask) {
+ freqs.cpu = i;
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+ perf->state = next_perf_state;
+ return result;
+}
-static int
-acpi_cpufreq_verify (
- struct cpufreq_policy *policy)
+static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
{
- unsigned int result = 0;
- struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
+ struct acpi_cpufreq_data *data = drv_data[policy->cpu];
dprintk("acpi_cpufreq_verify\n");
- result = cpufreq_frequency_table_verify(policy,
- data->freq_table);
-
- return (result);
+ return cpufreq_frequency_table_verify(policy, data->freq_table);
}
-
static unsigned long
-acpi_cpufreq_guess_freq (
- struct cpufreq_acpi_io *data,
- unsigned int cpu)
+acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
{
- struct acpi_processor_performance *perf = data->acpi_data;
+ struct acpi_processor_performance *perf = data->acpi_data;
if (cpu_khz) {
/* search the closest match to cpu_khz */
@@ -338,16 +502,16 @@ acpi_cpufreq_guess_freq (
unsigned long freq;
unsigned long freqn = perf->states[0].core_frequency * 1000;
- for (i = 0; i < (perf->state_count - 1); i++) {
+ for (i=0; i<(perf->state_count-1); i++) {
freq = freqn;
freqn = perf->states[i+1].core_frequency * 1000;
if ((2 * cpu_khz) > (freqn + freq)) {
perf->state = i;
- return (freq);
+ return freq;
}
}
- perf->state = perf->state_count - 1;
- return (freqn);
+ perf->state = perf->state_count-1;
+ return freqn;
} else {
/* assume CPU is at P0... */
perf->state = 0;
@@ -355,7 +519,6 @@ acpi_cpufreq_guess_freq (
}
}
-
/*
* acpi_cpufreq_early_init - initialize ACPI P-States library
*
@@ -364,30 +527,34 @@ acpi_cpufreq_guess_freq (
* do _PDC and _PSD and find out the processor dependency for the
* actual init that will happen later...
*/
-static int acpi_cpufreq_early_init_acpi(void)
+static int acpi_cpufreq_early_init(void)
{
- struct acpi_processor_performance *data;
- unsigned int i, j;
+ struct acpi_processor_performance *data;
+ cpumask_t covered;
+ unsigned int i, j;
dprintk("acpi_cpufreq_early_init\n");
for_each_possible_cpu(i) {
- data = kzalloc(sizeof(struct acpi_processor_performance),
- GFP_KERNEL);
+ data = kzalloc(sizeof(struct acpi_processor_performance),
+ GFP_KERNEL);
if (!data) {
- for_each_possible_cpu(j) {
+ for_each_cpu_mask(j, covered) {
kfree(acpi_perf_data[j]);
acpi_perf_data[j] = NULL;
}
- return (-ENOMEM);
+ return -ENOMEM;
}
acpi_perf_data[i] = data;
+ cpu_set(i, covered);
}
/* Do initialization in ACPI core */
- return acpi_processor_preregister_performance(acpi_perf_data);
+ acpi_processor_preregister_performance(acpi_perf_data);
+ return 0;
}
+#ifdef CONFIG_SMP
/*
* Some BIOSes do SW_ANY coordination internally, either set it up in hw
* or do it in BIOS firmware and won't inform about it to OS. If not
@@ -414,39 +581,42 @@ static struct dmi_system_id sw_any_bug_dmi_table[] = {
},
{ }
};
+#endif
-static int
-acpi_cpufreq_cpu_init (
- struct cpufreq_policy *policy)
+static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- unsigned int i;
- unsigned int cpu = policy->cpu;
- struct cpufreq_acpi_io *data;
- unsigned int result = 0;
+ unsigned int i;
+ unsigned int valid_states = 0;
+ unsigned int cpu = policy->cpu;
+ struct acpi_cpufreq_data *data;
+ unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
- struct acpi_processor_performance *perf;
+ struct acpi_processor_performance *perf;
dprintk("acpi_cpufreq_cpu_init\n");
if (!acpi_perf_data[cpu])
- return (-ENODEV);
+ return -ENODEV;
- data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
+ data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
if (!data)
- return (-ENOMEM);
+ return -ENOMEM;
data->acpi_data = acpi_perf_data[cpu];
- acpi_io_data[cpu] = data;
+ drv_data[cpu] = data;
- result = acpi_processor_register_performance(data->acpi_data, cpu);
+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
+ acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
+ result = acpi_processor_register_performance(data->acpi_data, cpu);
if (result)
goto err_free;
perf = data->acpi_data;
policy->shared_type = perf->shared_type;
+
/*
- * Will let policy->cpus know about dependency only when software
+ * Will let policy->cpus know about dependency only when software
* coordination is required.
*/
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
@@ -462,10 +632,6 @@ acpi_cpufreq_cpu_init (
}
#endif
- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
- }
-
/* capability check */
if (perf->state_count <= 1) {
dprintk("No P-States\n");
@@ -473,17 +639,33 @@ acpi_cpufreq_cpu_init (
goto err_unreg;
}
- if ((perf->control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) ||
- (perf->status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
- dprintk("Unsupported address space [%d, %d]\n",
- (u32) (perf->control_register.space_id),
- (u32) (perf->status_register.space_id));
+ if (perf->control_register.space_id != perf->status_register.space_id) {
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ switch (perf->control_register.space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ dprintk("SYSTEM IO addr space\n");
+ data->cpu_feature = SYSTEM_IO_CAPABLE;
+ break;
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ dprintk("HARDWARE addr space\n");
+ if (!check_est_cpu(cpu)) {
+ result = -ENODEV;
+ goto err_unreg;
+ }
+ data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
+ break;
+ default:
+ dprintk("Unknown addr space %d\n",
+ (u32) (perf->control_register.space_id));
result = -ENODEV;
goto err_unreg;
}
- /* alloc freq_table */
- data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count + 1), GFP_KERNEL);
+ data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
+ (perf->state_count+1), GFP_KERNEL);
if (!data->freq_table) {
result = -ENOMEM;
goto err_unreg;
@@ -492,129 +674,140 @@ acpi_cpufreq_cpu_init (
/* detect transition latency */
policy->cpuinfo.transition_latency = 0;
for (i=0; i<perf->state_count; i++) {
- if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
- policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000;
+ if ((perf->states[i].transition_latency * 1000) >
+ policy->cpuinfo.transition_latency)
+ policy->cpuinfo.transition_latency =
+ perf->states[i].transition_latency * 1000;
}
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
- /* The current speed is unknown and not detectable by ACPI... */
- policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
-
+ data->max_freq = perf->states[0].core_frequency * 1000;
/* table init */
- for (i=0; i<=perf->state_count; i++)
- {
- data->freq_table[i].index = i;
- if (i<perf->state_count)
- data->freq_table[i].frequency = perf->states[i].core_frequency * 1000;
- else
- data->freq_table[i].frequency = CPUFREQ_TABLE_END;
+ for (i=0; i<perf->state_count; i++) {
+ if (i>0 && perf->states[i].core_frequency ==
+ perf->states[i-1].core_frequency)
+ continue;
+
+ data->freq_table[valid_states].index = i;
+ data->freq_table[valid_states].frequency =
+ perf->states[i].core_frequency * 1000;
+ valid_states++;
}
+ data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
- if (result) {
+ if (result)
goto err_freqfree;
+
+ switch (data->cpu_feature) {
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ /* Current speed is unknown and not detectable by IO port */
+ policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
+ break;
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
+ get_cur_freq_on_cpu(cpu);
+ break;
+ default:
+ break;
}
/* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE);
- printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n",
- cpu);
+ /* Check for APERF/MPERF support in hardware */
+ if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) {
+ unsigned int ecx;
+ ecx = cpuid_ecx(6);
+ if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY)
+ acpi_cpufreq_driver.getavg = get_measured_perf;
+ }
+
+ dprintk("CPU%u - ACPI performance management activated.\n", cpu);
for (i = 0; i < perf->state_count; i++)
dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
- (i == perf->state?'*':' '), i,
+ (i == perf->state ? '*' : ' '), i,
(u32) perf->states[i].core_frequency,
(u32) perf->states[i].power,
(u32) perf->states[i].transition_latency);
cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
-
+
/*
* the first call to ->target() should result in us actually
* writing something to the appropriate registers.
*/
data->resume = 1;
-
- return (result);
- err_freqfree:
+ return result;
+
+err_freqfree:
kfree(data->freq_table);
- err_unreg:
+err_unreg:
acpi_processor_unregister_performance(perf, cpu);
- err_free:
+err_free:
kfree(data);
- acpi_io_data[cpu] = NULL;
+ drv_data[cpu] = NULL;
- return (result);
+ return result;
}
-
-static int
-acpi_cpufreq_cpu_exit (
- struct cpufreq_policy *policy)
+static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
- struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
-
+ struct acpi_cpufreq_data *data = drv_data[policy->cpu];
dprintk("acpi_cpufreq_cpu_exit\n");
if (data) {
cpufreq_frequency_table_put_attr(policy->cpu);
- acpi_io_data[policy->cpu] = NULL;
- acpi_processor_unregister_performance(data->acpi_data, policy->cpu);
+ drv_data[policy->cpu] = NULL;
+ acpi_processor_unregister_performance(data->acpi_data,
+ policy->cpu);
kfree(data);
}
- return (0);
+ return 0;
}
-static int
-acpi_cpufreq_resume (
- struct cpufreq_policy *policy)
+static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
{
- struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
-
+ struct acpi_cpufreq_data *data = drv_data[policy->cpu];
dprintk("acpi_cpufreq_resume\n");
data->resume = 1;
- return (0);
+ return 0;
}
-
-static struct freq_attr* acpi_cpufreq_attr[] = {
+static struct freq_attr *acpi_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver acpi_cpufreq_driver = {
- .verify = acpi_cpufreq_verify,
- .target = acpi_cpufreq_target,
- .init = acpi_cpufreq_cpu_init,
- .exit = acpi_cpufreq_cpu_exit,
- .resume = acpi_cpufreq_resume,
- .name = "acpi-cpufreq",
- .owner = THIS_MODULE,
- .attr = acpi_cpufreq_attr,
+ .verify = acpi_cpufreq_verify,
+ .target = acpi_cpufreq_target,
+ .init = acpi_cpufreq_cpu_init,
+ .exit = acpi_cpufreq_cpu_exit,
+ .resume = acpi_cpufreq_resume,
+ .name = "acpi-cpufreq",
+ .owner = THIS_MODULE,
+ .attr = acpi_cpufreq_attr,
};
-
-static int __init
-acpi_cpufreq_init (void)
+static int __init acpi_cpufreq_init(void)
{
dprintk("acpi_cpufreq_init\n");
- acpi_cpufreq_early_init_acpi();
+ acpi_cpufreq_early_init();
return cpufreq_register_driver(&acpi_cpufreq_driver);
}
-
-static void __exit
-acpi_cpufreq_exit (void)
+static void __exit acpi_cpufreq_exit(void)
{
- unsigned int i;
+ unsigned int i;
dprintk("acpi_cpufreq_exit\n");
cpufreq_unregister_driver(&acpi_cpufreq_driver);
@@ -627,7 +820,9 @@ acpi_cpufreq_exit (void)
}
module_param(acpi_pstate_strict, uint, 0644);
-MODULE_PARM_DESC(acpi_pstate_strict, "value 0 or non-zero. non-zero -> strict ACPI checks are performed during frequency changes.");
+MODULE_PARM_DESC(acpi_pstate_strict,
+ "value 0 or non-zero. non-zero -> strict ACPI checks are "
+ "performed during frequency changes.");
late_initcall(acpi_cpufreq_init);
module_exit(acpi_cpufreq_exit);
diff --git a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
index 92afa3bc84f..6667e9cceb9 100644
--- a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
@@ -447,7 +447,6 @@ static int __init cpufreq_gx_init(void)
int ret;
struct gxfreq_params *params;
struct pci_dev *gx_pci;
- u32 class_rev;
/* Test if we have the right hardware */
if ((gx_pci = gx_detect_chipset()) == NULL)
@@ -472,8 +471,7 @@ static int __init cpufreq_gx_init(void)
pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2));
pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration));
pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration));
- pci_read_config_dword(params->cs55x0, PCI_CLASS_REVISION, &class_rev);
- params->pci_rev = class_rev && 0xff;
+ pci_read_config_byte(params->cs55x0, PCI_REVISION_ID, &params->pci_rev);
if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) {
kfree(params);
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index 7233abe5d69..c548daad347 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -410,7 +410,7 @@ static int __init longhaul_get_ranges(void)
maxmult=longhaul_get_cpu_mult();
/* Starting with the 1.2GHz parts, theres a 200MHz bus. */
- if ((cpu_khz/1000) > 1200)
+ if ((cpu_khz/maxmult) > 13400)
fsb = 200;
else
fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
@@ -583,6 +583,10 @@ static int enable_arbiter_disable(void)
if (dev == NULL) {
reg = 0x76;
dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_862X_0, NULL);
+ /* Find CN400 V-Link host bridge */
+ if (dev == NULL)
+ dev = pci_find_device(PCI_VENDOR_ID_VIA, 0x7259, NULL);
+
}
if (dev != NULL) {
/* Enable access to port 0x22 */
@@ -734,7 +738,7 @@ print_support_type:
return 0;
err_acpi:
- printk(KERN_ERR PFX "No ACPI support. No VT8601 or VT8623 northbridge. Aborting.\n");
+ printk(KERN_ERR PFX "No ACPI support. Unsupported northbridge. Aborting.\n");
return -ENODEV;
}
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index 304d2eaa4a1..bec50170b75 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -163,29 +163,27 @@ static int cpufreq_p4_verify(struct cpufreq_policy *policy)
static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
{
- if ((c->x86 == 0x06) && (c->x86_model == 0x09)) {
- /* Pentium M (Banias) */
- printk(KERN_WARNING PFX "Warning: Pentium M detected. "
- "The speedstep_centrino module offers voltage scaling"
- " in addition of frequency scaling. You should use "
- "that instead of p4-clockmod, if possible.\n");
- return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM);
- }
-
- if ((c->x86 == 0x06) && (c->x86_model == 0x0D)) {
- /* Pentium M (Dothan) */
- printk(KERN_WARNING PFX "Warning: Pentium M detected. "
- "The speedstep_centrino module offers voltage scaling"
- " in addition of frequency scaling. You should use "
- "that instead of p4-clockmod, if possible.\n");
- /* on P-4s, the TSC runs with constant frequency independent whether
- * throttling is active or not. */
- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
- return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM);
+ if (c->x86 == 0x06) {
+ if (cpu_has(c, X86_FEATURE_EST))
+ printk(KERN_WARNING PFX "Warning: EST-capable CPU detected. "
+ "The acpi-cpufreq module offers voltage scaling"
+ " in addition of frequency scaling. You should use "
+ "that instead of p4-clockmod, if possible.\n");
+ switch (c->x86_model) {
+ case 0x0E: /* Core */
+ case 0x0F: /* Core Duo */
+ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+ return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PCORE);
+ case 0x0D: /* Pentium M (Dothan) */
+ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+ /* fall through */
+ case 0x09: /* Pentium M (Banias) */
+ return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM);
+ }
}
if (c->x86 != 0xF) {
- printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <linux@brodo.de>\n");
+ printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <cpufreq@lists.linux.org.uk>\n");
return 0;
}
diff --git a/arch/i386/kernel/cpu/cpufreq/sc520_freq.c b/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
index ef457d50f4a..b8fb4b521c6 100644
--- a/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
+++ b/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
@@ -153,6 +153,7 @@ static struct cpufreq_driver sc520_freq_driver = {
static int __init sc520_freq_init(void)
{
struct cpuinfo_x86 *c = cpu_data;
+ int err;
/* Test if we have the right hardware */
if(c->x86_vendor != X86_VENDOR_AMD ||
@@ -166,7 +167,11 @@ static int __init sc520_freq_init(void)
return -ENOMEM;
}
- return cpufreq_register_driver(&sc520_freq_driver);
+ err = cpufreq_register_driver(&sc520_freq_driver);
+ if (err)
+ iounmap(cpuctl);
+
+ return err;
}
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index e8993baf3d1..5113e923163 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -36,6 +36,7 @@
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
+#define INTEL_MSR_RANGE (0xffff)
struct cpu_id
{
@@ -379,6 +380,7 @@ static int centrino_cpu_early_init_acpi(void)
}
+#ifdef CONFIG_SMP
/*
* Some BIOSes do SW_ANY coordination internally, either set it up in hw
* or do it in BIOS firmware and won't inform about it to OS. If not
@@ -392,7 +394,6 @@ static int sw_any_bug_found(struct dmi_system_id *d)
return 0;
}
-
static struct dmi_system_id sw_any_bug_dmi_table[] = {
{
.callback = sw_any_bug_found,
@@ -405,7 +406,7 @@ static struct dmi_system_id sw_any_bug_dmi_table[] = {
},
{ }
};
-
+#endif
/*
* centrino_cpu_init_acpi - register with ACPI P-States library
@@ -463,8 +464,9 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
}
for (i=0; i<p->state_count; i++) {
- if (p->states[i].control != p->states[i].status) {
- dprintk("Different control (%llu) and status values (%llu)\n",
+ if ((p->states[i].control & INTEL_MSR_RANGE) !=
+ (p->states[i].status & INTEL_MSR_RANGE)) {
+ dprintk("Different MSR bits in control (%llu) and status (%llu)\n",
p->states[i].control, p->states[i].status);
result = -EINVAL;
goto err_unreg;
@@ -500,7 +502,7 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
}
for (i=0; i<p->state_count; i++) {
- centrino_model[cpu]->op_points[i].index = p->states[i].control;
+ centrino_model[cpu]->op_points[i].index = p->states[i].control & INTEL_MSR_RANGE;
centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000;
dprintk("adding state %i with frequency %u and control value %04x\n",
i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index);
@@ -531,6 +533,9 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
/* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE);
+ printk("speedstep-centrino with X86_SPEEDSTEP_CENTRINO_ACPI"
+ "config is deprecated.\n "
+ "Use X86_ACPI_CPUFREQ (acpi-cpufreq instead.\n" );
return 0;
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
index 4f46cac155c..d59277c0091 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
@@ -123,6 +123,36 @@ static unsigned int pentiumM_get_frequency(void)
return (msr_tmp * 100 * 1000);
}
+static unsigned int pentium_core_get_frequency(void)
+{
+ u32 fsb = 0;
+ u32 msr_lo, msr_tmp;
+
+ rdmsr(MSR_FSB_FREQ, msr_lo, msr_tmp);
+ /* see table B-2 of 25366920.pdf */
+ switch (msr_lo & 0x07) {
+ case 5:
+ fsb = 100000;
+ break;
+ case 1:
+ fsb = 133333;
+ break;
+ case 3:
+ fsb = 166667;
+ break;
+ default:
+ printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value");
+ }
+
+ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
+ dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
+
+ msr_tmp = (msr_lo >> 22) & 0x1f;
+ dprintk("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * fsb));
+
+ return (msr_tmp * fsb);
+}
+
static unsigned int pentium4_get_frequency(void)
{
@@ -174,6 +204,8 @@ static unsigned int pentium4_get_frequency(void)
unsigned int speedstep_get_processor_frequency(unsigned int processor)
{
switch (processor) {
+ case SPEEDSTEP_PROCESSOR_PCORE:
+ return pentium_core_get_frequency();
case SPEEDSTEP_PROCESSOR_PM:
return pentiumM_get_frequency();
case SPEEDSTEP_PROCESSOR_P4D:
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
index b735429c50b..b11bcc608ca 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
@@ -22,6 +22,7 @@
* the speedstep_get_processor_frequency() call. */
#define SPEEDSTEP_PROCESSOR_PM 0xFFFFFF03 /* Pentium M */
#define SPEEDSTEP_PROCESSOR_P4D 0xFFFFFF04 /* desktop P4 */
+#define SPEEDSTEP_PROCESSOR_PCORE 0xFFFFFF05 /* Core */
/* speedstep states -- only two of them */
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
index c28333d5364..ff0d8980611 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
@@ -360,9 +360,6 @@ static int __init speedstep_init(void)
case SPEEDSTEP_PROCESSOR_PIII_C:
case SPEEDSTEP_PROCESSOR_PIII_C_EARLY:
break;
- case SPEEDSTEP_PROCESSOR_P4M:
- printk(KERN_INFO "speedstep-smi: you're trying to use this cpufreq driver on a Pentium 4-based CPU. Most likely it will not work.\n");
- break;
default:
speedstep_processor = 0;
}
diff --git a/arch/powerpc/platforms/4xx/Kconfig b/arch/powerpc/platforms/4xx/Kconfig
index ed39d6a3d22..2f2a13ed766 100644
--- a/arch/powerpc/platforms/4xx/Kconfig
+++ b/arch/powerpc/platforms/4xx/Kconfig
@@ -179,7 +179,7 @@ config BIOS_FIXUP
# OAK doesn't exist but wanted to keep this around for any future 403GCX boards
config 403GCX
bool
- depends OAK
+ depends on OAK
default y
config 405EP
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index ddbe398fbd4..b3c2ce4cb7a 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -35,7 +35,7 @@ config HDPU
Select HDPU if configuring a Sky Computers Compute Blade.
config HDPU_FEATURES
- depends HDPU
+ depends on HDPU
tristate "HDPU-Features"
help
Select to enable HDPU enhanced features.
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 692b5ba5320..8eb82efe05a 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -624,7 +624,7 @@ config HDPU
Select HDPU if configuring a Sky Computers Compute Blade.
config HDPU_FEATURES
- depends HDPU
+ depends on HDPU
tristate "HDPU-Features"
help
Select to enable HDPU enhanced features.
@@ -735,7 +735,7 @@ config LITE5200
config LITE5200B
bool "Freescale LITE5200B"
- depends LITE5200
+ depends on LITE5200
help
Support for the LITE5200B dev board for the MPC5200 from Freescale.
This is the new board with 2 PCI slots.
diff --git a/arch/ppc/platforms/4xx/Kconfig b/arch/ppc/platforms/4xx/Kconfig
index 293bd489e7d..6980de420e9 100644
--- a/arch/ppc/platforms/4xx/Kconfig
+++ b/arch/ppc/platforms/4xx/Kconfig
@@ -189,7 +189,7 @@ config BIOS_FIXUP
# OAK doesn't exist but wanted to keep this around for any future 403GCX boards
config 403GCX
bool
- depends OAK
+ depends on OAK
default y
config 405EP
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 286bc0b3207..b2e9762e13c 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -334,13 +334,12 @@ static int eth_configure(int n, void *init, char *mac,
size = transport->private_size + sizeof(struct uml_net_private) +
sizeof(((struct uml_net_private *) 0)->user);
- device = kmalloc(sizeof(*device), GFP_KERNEL);
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
if (device == NULL) {
printk(KERN_ERR "eth_configure failed to allocate uml_net\n");
return(1);
}
- memset(device, 0, sizeof(*device));
INIT_LIST_HEAD(&device->list);
device->index = n;
diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig
index bcf825875d1..f0d4d72e560 100644
--- a/arch/v850/Kconfig
+++ b/arch/v850/Kconfig
@@ -105,17 +105,17 @@ menu "Processor type and features"
# currently support
config V850E_MA1
bool
- depends RTE_CB_MA1
+ depends on RTE_CB_MA1
default y
# Similarly for the RTE-V850E/NB85E-CB - V850E/TEG
config V850E_TEG
bool
- depends RTE_CB_NB85E
+ depends on RTE_CB_NB85E
default y
# ... and the RTE-V850E/ME2-CB - V850E/ME2
config V850E_ME2
bool
- depends RTE_CB_ME2
+ depends on RTE_CB_ME2
default y
@@ -123,7 +123,7 @@ menu "Processor type and features"
config V850E2_SIM85E2
bool
- depends V850E2_SIM85E2C || V850E2_SIM85E2S
+ depends on V850E2_SIM85E2C || V850E2_SIM85E2S
default y
@@ -132,7 +132,7 @@ menu "Processor type and features"
# V850E2 processors
config V850E2
bool
- depends V850E2_SIM85E2 || V850E2_FPGA85E2C || V850E2_ANNA
+ depends on V850E2_SIM85E2 || V850E2_FPGA85E2C || V850E2_ANNA
default y
@@ -141,7 +141,7 @@ menu "Processor type and features"
# Boards in the RTE-x-CB series
config RTE_CB
bool
- depends RTE_CB_MA1 || RTE_CB_NB85E || RTE_CB_ME2
+ depends on RTE_CB_MA1 || RTE_CB_NB85E || RTE_CB_ME2
default y
config RTE_CB_MULTI
@@ -149,28 +149,28 @@ menu "Processor type and features"
# RTE_CB_NB85E can either have multi ROM support or not, but
# other platforms (currently only RTE_CB_MA1) require it.
prompt "Multi monitor ROM support" if RTE_CB_NB85E
- depends RTE_CB_MA1 || RTE_CB_NB85E
+ depends on RTE_CB_MA1 || RTE_CB_NB85E
default y
config RTE_CB_MULTI_DBTRAP
bool "Pass illegal insn trap / dbtrap to kernel"
- depends RTE_CB_MULTI
+ depends on RTE_CB_MULTI
default n
config RTE_CB_MA1_KSRAM
bool "Kernel in SRAM (limits size of kernel)"
- depends RTE_CB_MA1 && RTE_CB_MULTI
+ depends on RTE_CB_MA1 && RTE_CB_MULTI
default n
config RTE_MB_A_PCI
bool "Mother-A PCI support"
- depends RTE_CB
+ depends on RTE_CB
default y
# The GBUS is used to talk to the RTE-MOTHER-A board
config RTE_GBUS_INT
bool
- depends RTE_MB_A_PCI
+ depends on RTE_MB_A_PCI
default y
# The only PCI bus we support is on the RTE-MOTHER-A board
@@ -209,7 +209,7 @@ menu "Processor type and features"
config ROM_KERNEL
bool "Kernel in ROM"
- depends V850E2_ANNA || V850E_AS85EP1 || RTE_CB_ME2
+ depends on V850E2_ANNA || V850E_AS85EP1 || RTE_CB_ME2
# Some platforms pre-zero memory, in which case the kernel doesn't need to
config ZERO_BSS
@@ -225,10 +225,10 @@ menu "Processor type and features"
config V850E_HIGHRES_TIMER
bool "High resolution timer support"
- depends V850E_TIMER_D
+ depends on V850E_TIMER_D
config TIME_BOOTUP
bool "Time bootup"
- depends V850E_HIGHRES_TIMER
+ depends on V850E_HIGHRES_TIMER
config RESET_GUARD
bool "Reset Guard"
diff --git a/arch/x86_64/kernel/cpufreq/Kconfig b/arch/x86_64/kernel/cpufreq/Kconfig
index 81f1562e539..3abcfa3e1ed 100644
--- a/arch/x86_64/kernel/cpufreq/Kconfig
+++ b/arch/x86_64/kernel/cpufreq/Kconfig
@@ -27,10 +27,13 @@ config X86_POWERNOW_K8_ACPI
default y
config X86_SPEEDSTEP_CENTRINO
- tristate "Intel Enhanced SpeedStep"
+ tristate "Intel Enhanced SpeedStep (deprecated)"
select CPU_FREQ_TABLE
depends on ACPI_PROCESSOR
help
+ This is deprecated and this functionality is now merged into
+ acpi_cpufreq (X86_ACPI_CPUFREQ). Use that driver instead of
+ speedstep_centrino.
This adds the CPUFreq driver for Enhanced SpeedStep enabled
mobile CPUs. This means Intel Pentium M (Centrino) CPUs
or 64bit enabled Intel Xeons.
@@ -50,6 +53,7 @@ config X86_ACPI_CPUFREQ
help
This driver adds a CPUFreq driver which utilizes the ACPI
Processor Performance States.
+ This driver also supports Intel Enhanced Speedstep.
For details, take a look at <file:Documentation/cpu-freq/>.
diff --git a/arch/x86_64/kernel/cpufreq/Makefile b/arch/x86_64/kernel/cpufreq/Makefile
index d8b59387922..753ce1dd418 100644
--- a/arch/x86_64/kernel/cpufreq/Makefile
+++ b/arch/x86_64/kernel/cpufreq/Makefile
@@ -5,8 +5,8 @@
SRCDIR := ../../../i386/kernel/cpu/cpufreq
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
-obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
+obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index b10f4d8fdc7..0a3aee29e06 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -867,7 +867,7 @@ config SONYPI
config TANBAC_TB0219
tristate "TANBAC TB0219 base board support"
- depends TANBAC_TB022X
+ depends on TANBAC_TB022X
select GPIO_VR41XX
source "drivers/char/agp/Kconfig"
diff --git a/drivers/char/watchdog/at91rm9200_wdt.c b/drivers/char/watchdog/at91rm9200_wdt.c
index cb86967e2c5..38bd3737259 100644
--- a/drivers/char/watchdog/at91rm9200_wdt.c
+++ b/drivers/char/watchdog/at91rm9200_wdt.c
@@ -203,9 +203,9 @@ static int __init at91wdt_probe(struct platform_device *pdev)
{
int res;
- if (at91wdt_miscdev.dev)
+ if (at91wdt_miscdev.parent)
return -EBUSY;
- at91wdt_miscdev.dev = &pdev->dev;
+ at91wdt_miscdev.parent = &pdev->dev;
res = misc_register(&at91wdt_miscdev);
if (res)
@@ -221,7 +221,7 @@ static int __exit at91wdt_remove(struct platform_device *pdev)
res = misc_deregister(&at91wdt_miscdev);
if (!res)
- at91wdt_miscdev.dev = NULL;
+ at91wdt_miscdev.parent = NULL;
return res;
}
diff --git a/drivers/char/watchdog/mpcore_wdt.c b/drivers/char/watchdog/mpcore_wdt.c
index 3404a9c67f0..e88947f8fe5 100644
--- a/drivers/char/watchdog/mpcore_wdt.c
+++ b/drivers/char/watchdog/mpcore_wdt.c
@@ -347,7 +347,7 @@ static int __devinit mpcore_wdt_probe(struct platform_device *dev)
goto err_free;
}
- mpcore_wdt_miscdev.dev = &dev->dev;
+ mpcore_wdt_miscdev.parent = &dev->dev;
ret = misc_register(&mpcore_wdt_miscdev);
if (ret) {
dev_printk(KERN_ERR, _dev, "cannot register miscdev on minor=%d (err=%d)\n",
diff --git a/drivers/char/watchdog/omap_wdt.c b/drivers/char/watchdog/omap_wdt.c
index 5dbd7dc2936..6c6f97332db 100644
--- a/drivers/char/watchdog/omap_wdt.c
+++ b/drivers/char/watchdog/omap_wdt.c
@@ -290,7 +290,7 @@ static int __init omap_wdt_probe(struct platform_device *pdev)
omap_wdt_disable();
omap_wdt_adjust_timeout(timer_margin);
- omap_wdt_miscdev.dev = &pdev->dev;
+ omap_wdt_miscdev.parent = &pdev->dev;
ret = misc_register(&omap_wdt_miscdev);
if (ret)
goto fail;
diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c
index 61138726b50..2da5ac99687 100644
--- a/drivers/char/watchdog/pcwd_usb.c
+++ b/drivers/char/watchdog/pcwd_usb.c
@@ -42,6 +42,7 @@
#include <asm/uaccess.h>
#include <linux/usb.h>
#include <linux/mutex.h>
+#include <linux/hid.h> /* For HID_REQ_SET_REPORT & HID_DT_REPORT */
#ifdef CONFIG_USB_DEBUG
@@ -109,10 +110,6 @@ MODULE_DEVICE_TABLE (usb, usb_pcwd_table);
#define CMD_ENABLE_WATCHDOG 0x30 /* Enable / Disable Watchdog */
#define CMD_DISABLE_WATCHDOG CMD_ENABLE_WATCHDOG
-/* Some defines that I like to be somewhere else like include/linux/usb_hid.h */
-#define HID_REQ_SET_REPORT 0x09
-#define HID_DT_REPORT (USB_TYPE_CLASS | 0x02)
-
/* We can only use 1 card due to the /dev/watchdog restriction */
static int cards_found;
diff --git a/drivers/char/watchdog/rm9k_wdt.c b/drivers/char/watchdog/rm9k_wdt.c
index ec3909371c2..7576a13e86b 100644
--- a/drivers/char/watchdog/rm9k_wdt.c
+++ b/drivers/char/watchdog/rm9k_wdt.c
@@ -47,7 +47,7 @@
/* Function prototypes */
-static irqreturn_t wdt_gpi_irqhdl(int, void *, struct pt_regs *);
+static irqreturn_t wdt_gpi_irqhdl(int, void *);
static void wdt_gpi_start(void);
static void wdt_gpi_stop(void);
static void wdt_gpi_set_timeout(unsigned int);
@@ -94,8 +94,28 @@ module_param(nowayout, bool, 0444);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be disabled once started");
+/* Kernel interfaces */
+static struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = wdt_gpi_open,
+ .release = wdt_gpi_release,
+ .write = wdt_gpi_write,
+ .unlocked_ioctl = wdt_gpi_ioctl,
+};
+
+static struct miscdevice miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = wdt_gpi_name,
+ .fops = &fops,
+};
+
+static struct notifier_block wdt_gpi_shutdown = {
+ .notifier_call = wdt_gpi_notify,
+};
+
+
/* Interrupt handler */
-static irqreturn_t wdt_gpi_irqhdl(int irq, void *ctxt, struct pt_regs *regs)
+static irqreturn_t wdt_gpi_irqhdl(int irq, void *ctxt)
{
if (!unlikely(__raw_readl(wd_regs + 0x0008) & 0x1))
return IRQ_NONE;
@@ -312,26 +332,6 @@ wdt_gpi_notify(struct notifier_block *this, unsigned long code, void *unused)
}
-/* Kernel interfaces */
-static struct file_operations fops = {
- .owner = THIS_MODULE,
- .open = wdt_gpi_open,
- .release = wdt_gpi_release,
- .write = wdt_gpi_write,
- .unlocked_ioctl = wdt_gpi_ioctl,
-};
-
-static struct miscdevice miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = wdt_gpi_name,
- .fops = &fops,
-};
-
-static struct notifier_block wdt_gpi_shutdown = {
- .notifier_call = wdt_gpi_notify,
-};
-
-
/* Init & exit procedures */
static const struct resource *
wdt_gpi_get_resource(struct platform_device *pdv, const char *name,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 47ab42db122..9fb2edf3661 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -29,7 +29,8 @@
#include <linux/completion.h>
#include <linux/mutex.h>
-#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg)
+#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
+ "cpufreq-core", msg)
/**
* The "cpufreq driver" - the arch- or hardware-dependent low
@@ -151,7 +152,8 @@ static void cpufreq_debug_disable_ratelimit(void)
spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
}
-void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt, ...)
+void cpufreq_debug_printk(unsigned int type, const char *prefix,
+ const char *fmt, ...)
{
char s[256];
va_list args;
@@ -161,7 +163,8 @@ void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt
WARN_ON(!prefix);
if (type & debug) {
spin_lock_irqsave(&disable_ratelimit_lock, flags);
- if (!disable_ratelimit && debug_ratelimit && !printk_ratelimit()) {
+ if (!disable_ratelimit && debug_ratelimit
+ && !printk_ratelimit()) {
spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
return;
}
@@ -182,10 +185,12 @@ EXPORT_SYMBOL(cpufreq_debug_printk);
module_param(debug, uint, 0644);
-MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core, 2 to debug drivers, and 4 to debug governors.");
+MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
+ " 2 to debug drivers, and 4 to debug governors.");
module_param(debug_ratelimit, uint, 0644);
-MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging: set to 0 to disable ratelimiting.");
+MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
+ " set to 0 to disable ratelimiting.");
#else /* !CONFIG_CPU_FREQ_DEBUG */
@@ -219,17 +224,23 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
if (!l_p_j_ref_freq) {
l_p_j_ref = loops_per_jiffy;
l_p_j_ref_freq = ci->old;
- dprintk("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
+ dprintk("saving %lu as reference value for loops_per_jiffy;"
+ "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
}
if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
(val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
(val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
- loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new);
- dprintk("scaling loops_per_jiffy to %lu for frequency %u kHz\n", loops_per_jiffy, ci->new);
+ loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
+ ci->new);
+ dprintk("scaling loops_per_jiffy to %lu"
+ "for frequency %u kHz\n", loops_per_jiffy, ci->new);
}
}
#else
-static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { return; }
+static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
+{
+ return;
+}
#endif
@@ -316,7 +327,8 @@ static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
*policy = CPUFREQ_POLICY_PERFORMANCE;
err = 0;
- } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
+ } else if (!strnicmp(str_governor, "powersave",
+ CPUFREQ_NAME_LEN)) {
*policy = CPUFREQ_POLICY_POWERSAVE;
err = 0;
}
@@ -328,7 +340,8 @@ static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
t = __find_governor(str_governor);
if (t == NULL) {
- char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", str_governor);
+ char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
+ str_governor);
if (name) {
int ret;
@@ -361,7 +374,8 @@ extern struct sysdev_class cpu_sysdev_class;
/**
- * cpufreq_per_cpu_attr_read() / show_##file_name() - print out cpufreq information
+ * cpufreq_per_cpu_attr_read() / show_##file_name() -
+ * print out cpufreq information
*
* Write out information from cpufreq_driver->policy[cpu]; object must be
* "unsigned int".
@@ -380,7 +394,8 @@ show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
-static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy);
+static int __cpufreq_set_policy(struct cpufreq_policy *data,
+ struct cpufreq_policy *policy);
/**
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
@@ -416,7 +431,8 @@ store_one(scaling_max_freq,max);
/**
* show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
*/
-static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf)
+static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy,
+ char *buf)
{
unsigned int cur_freq = cpufreq_get(policy->cpu);
if (!cur_freq)
@@ -428,7 +444,8 @@ static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf)
/**
* show_scaling_governor - show the current policy for the specified CPU
*/
-static ssize_t show_scaling_governor (struct cpufreq_policy * policy, char *buf)
+static ssize_t show_scaling_governor (struct cpufreq_policy * policy,
+ char *buf)
{
if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
return sprintf(buf, "powersave\n");
@@ -458,7 +475,8 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
if (ret != 1)
return -EINVAL;
- if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor))
+ if (cpufreq_parse_governor(str_governor, &new_policy.policy,
+ &new_policy.governor))
return -EINVAL;
lock_cpu_hotplug();
@@ -474,7 +492,10 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
unlock_cpu_hotplug();
- return ret ? ret : count;
+ if (ret)
+ return ret;
+ else
+ return count;
}
/**
@@ -488,7 +509,7 @@ static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
/**
* show_scaling_available_governors - show the available CPUfreq governors
*/
-static ssize_t show_scaling_available_governors (struct cpufreq_policy * policy,
+static ssize_t show_scaling_available_governors (struct cpufreq_policy *policy,
char *buf)
{
ssize_t i = 0;
@@ -574,7 +595,11 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
policy = cpufreq_cpu_get(policy->cpu);
if (!policy)
return -EINVAL;
- ret = fattr->show ? fattr->show(policy,buf) : -EIO;
+ if (fattr->show)
+ ret = fattr->show(policy, buf);
+ else
+ ret = -EIO;
+
cpufreq_cpu_put(policy);
return ret;
}
@@ -588,7 +613,11 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr,
policy = cpufreq_cpu_get(policy->cpu);
if (!policy)
return -EINVAL;
- ret = fattr->store ? fattr->store(policy,buf,count) : -EIO;
+ if (fattr->store)
+ ret = fattr->store(policy, buf, count);
+ else
+ ret = -EIO;
+
cpufreq_cpu_put(policy);
return ret;
}
@@ -913,7 +942,8 @@ static void handle_update(struct work_struct *work)
* We adjust to current frequency first, and need to clean up later. So either call
* to cpufreq_update_policy() or schedule handle_update()).
*/
-static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigned int new_freq)
+static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
+ unsigned int new_freq)
{
struct cpufreq_freqs freqs;
@@ -938,16 +968,16 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigne
unsigned int cpufreq_quick_get(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret = 0;
+ unsigned int ret_freq = 0;
if (policy) {
mutex_lock(&policy->lock);
- ret = policy->cur;
+ ret_freq = policy->cur;
mutex_unlock(&policy->lock);
cpufreq_cpu_put(policy);
}
- return (ret);
+ return (ret_freq);
}
EXPORT_SYMBOL(cpufreq_quick_get);
@@ -961,7 +991,7 @@ EXPORT_SYMBOL(cpufreq_quick_get);
unsigned int cpufreq_get(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- unsigned int ret = 0;
+ unsigned int ret_freq = 0;
if (!policy)
return 0;
@@ -971,12 +1001,14 @@ unsigned int cpufreq_get(unsigned int cpu)
mutex_lock(&policy->lock);
- ret = cpufreq_driver->get(cpu);
+ ret_freq = cpufreq_driver->get(cpu);
- if (ret && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
- /* verify no discrepancy between actual and saved value exists */
- if (unlikely(ret != policy->cur)) {
- cpufreq_out_of_sync(cpu, policy->cur, ret);
+ if (ret_freq && policy->cur &&
+ !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
+ /* verify no discrepancy between actual and
+ saved value exists */
+ if (unlikely(ret_freq != policy->cur)) {
+ cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
schedule_work(&policy->update);
}
}
@@ -986,7 +1018,7 @@ unsigned int cpufreq_get(unsigned int cpu)
out:
cpufreq_cpu_put(policy);
- return (ret);
+ return (ret_freq);
}
EXPORT_SYMBOL(cpufreq_get);
@@ -998,7 +1030,7 @@ EXPORT_SYMBOL(cpufreq_get);
static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
{
int cpu = sysdev->id;
- unsigned int ret = 0;
+ int ret = 0;
unsigned int cur_freq = 0;
struct cpufreq_policy *cpu_policy;
@@ -1080,7 +1112,7 @@ out:
static int cpufreq_resume(struct sys_device * sysdev)
{
int cpu = sysdev->id;
- unsigned int ret = 0;
+ int ret = 0;
struct cpufreq_policy *cpu_policy;
dprintk("resuming cpu %u\n", cpu);
@@ -1276,22 +1308,45 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
+int cpufreq_driver_getavg(struct cpufreq_policy *policy)
+{
+ int ret = 0;
+
+ policy = cpufreq_cpu_get(policy->cpu);
+ if (!policy)
+ return -EINVAL;
+
+ mutex_lock(&policy->lock);
+
+ if (cpu_online(policy->cpu) && cpufreq_driver->getavg)
+ ret = cpufreq_driver->getavg(policy->cpu);
+
+ mutex_unlock(&policy->lock);
+
+ cpufreq_cpu_put(policy);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_getavg);
+
/*
* Locking: Must be called with the lock_cpu_hotplug() lock held
* when "event" is CPUFREQ_GOV_LIMITS
*/
-static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
+static int __cpufreq_governor(struct cpufreq_policy *policy,
+ unsigned int event)
{
int ret;
if (!try_module_get(policy->governor->owner))
return -EINVAL;
- dprintk("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event);
+ dprintk("__cpufreq_governor for CPU %u, event %u\n",
+ policy->cpu, event);
ret = policy->governor->governor(policy, event);
- /* we keep one module reference alive for each CPU governed by this CPU */
+ /* we keep one module reference alive for
+ each CPU governed by this CPU */
if ((event != CPUFREQ_GOV_START) || ret)
module_put(policy->governor->owner);
if ((event == CPUFREQ_GOV_STOP) && !ret)
@@ -1367,9 +1422,12 @@ EXPORT_SYMBOL(cpufreq_get_policy);
/*
+ * data : current policy.
+ * policy : policy to be set.
* Locking: Must be called with the lock_cpu_hotplug() lock held
*/
-static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy)
+static int __cpufreq_set_policy(struct cpufreq_policy *data,
+ struct cpufreq_policy *policy)
{
int ret = 0;
@@ -1377,7 +1435,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
policy->min, policy->max);
- memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo));
+ memcpy(&policy->cpuinfo, &data->cpuinfo,
+ sizeof(struct cpufreq_cpuinfo));
if (policy->min > data->min && policy->min > policy->max) {
ret = -EINVAL;
@@ -1410,7 +1469,8 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
data->min = policy->min;
data->max = policy->max;
- dprintk("new min and max freqs are %u - %u kHz\n", data->min, data->max);
+ dprintk("new min and max freqs are %u - %u kHz\n",
+ data->min, data->max);
if (cpufreq_driver->setpolicy) {
data->policy = policy->policy;
@@ -1431,10 +1491,12 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
data->governor = policy->governor;
if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
/* new governor failed, so re-start old one */
- dprintk("starting governor %s failed\n", data->governor->name);
+ dprintk("starting governor %s failed\n",
+ data->governor->name);
if (old_gov) {
data->governor = old_gov;
- __cpufreq_governor(data, CPUFREQ_GOV_START);
+ __cpufreq_governor(data,
+ CPUFREQ_GOV_START);
}
ret = -EINVAL;
goto error_out;
@@ -1524,7 +1586,8 @@ int cpufreq_update_policy(unsigned int cpu)
data->cur = policy.cur;
} else {
if (data->cur != policy.cur)
- cpufreq_out_of_sync(cpu, data->cur, policy.cur);
+ cpufreq_out_of_sync(cpu, data->cur,
+ policy.cur);
}
}
@@ -1626,8 +1689,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
/* if all ->init() calls failed, unregister */
if (ret) {
- dprintk("no CPU initialized for driver %s\n", driver_data->name);
- sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
+ dprintk("no CPU initialized for driver %s\n",
+ driver_data->name);
+ sysdev_driver_unregister(&cpu_sysdev_class,
+ &cpufreq_sysdev_driver);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 5ef5ede5b88..eef0270c6f3 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -44,15 +44,17 @@
* latency of the processor. The governor will work on any processor with
* transition latency <= 10mS, using appropriate sampling
* rate.
- * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
- * this governor will not work.
+ * For CPUs with transition latency > 10mS (mostly drivers
+ * with CPUFREQ_ETERNAL), this governor will not work.
* All times here are in uS.
*/
static unsigned int def_sampling_rate;
#define MIN_SAMPLING_RATE_RATIO (2)
/* for correct statistics, we need at least 10 ticks between each measure */
-#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
-#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
+#define MIN_STAT_SAMPLING_RATE \
+ (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
+#define MIN_SAMPLING_RATE \
+ (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
#define DEF_SAMPLING_DOWN_FACTOR (1)
@@ -103,11 +105,16 @@ static struct dbs_tuners dbs_tuners_ins = {
static inline unsigned int get_cpu_idle_time(unsigned int cpu)
{
- return kstat_cpu(cpu).cpustat.idle +
+ unsigned int add_nice = 0, ret;
+
+ if (dbs_tuners_ins.ignore_nice)
+ add_nice = kstat_cpu(cpu).cpustat.nice;
+
+ ret = kstat_cpu(cpu).cpustat.idle +
kstat_cpu(cpu).cpustat.iowait +
- ( dbs_tuners_ins.ignore_nice ?
- kstat_cpu(cpu).cpustat.nice :
- 0);
+ add_nice;
+
+ return ret;
}
/************************** sysfs interface ************************/
@@ -452,6 +459,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int cpu = policy->cpu;
struct cpu_dbs_info_s *this_dbs_info;
unsigned int j;
+ int rc;
this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
@@ -468,6 +476,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
break;
mutex_lock(&dbs_mutex);
+
+ rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
+ if (rc) {
+ mutex_unlock(&dbs_mutex);
+ return rc;
+ }
+
for_each_cpu_mask(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -480,7 +495,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
this_dbs_info->enable = 1;
this_dbs_info->down_skip = 0;
this_dbs_info->requested_freq = policy->cur;
- sysfs_create_group(&policy->kobj, &dbs_attr_group);
+
dbs_enable++;
/*
* Start the timerschedule work, when this governor
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index e1cc5113c2a..f697449327c 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -41,8 +41,10 @@
static unsigned int def_sampling_rate;
#define MIN_SAMPLING_RATE_RATIO (2)
/* for correct statistics, we need at least 10 ticks between each measure */
-#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
-#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
+#define MIN_STAT_SAMPLING_RATE \
+ (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
+#define MIN_SAMPLING_RATE \
+ (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
@@ -206,7 +208,8 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
ret = sscanf(buf, "%u", &input);
mutex_lock(&dbs_mutex);
- if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
+ if (ret != 1 || input > MAX_SAMPLING_RATE
+ || input < MIN_SAMPLING_RATE) {
mutex_unlock(&dbs_mutex);
return -EINVAL;
}
@@ -397,8 +400,15 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
* policy. To be safe, we focus 10 points under the threshold.
*/
if (load < (dbs_tuners_ins.up_threshold - 10)) {
- unsigned int freq_next = (policy->cur * load) /
+ unsigned int freq_next, freq_cur;
+
+ freq_cur = cpufreq_driver_getavg(policy);
+ if (!freq_cur)
+ freq_cur = policy->cur;
+
+ freq_next = (freq_cur * load) /
(dbs_tuners_ins.up_threshold - 10);
+
if (!dbs_tuners_ins.powersave_bias) {
__cpufreq_driver_target(policy, freq_next,
CPUFREQ_RELATION_L);
@@ -472,6 +482,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int cpu = policy->cpu;
struct cpu_dbs_info_s *this_dbs_info;
unsigned int j;
+ int rc;
this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
@@ -494,12 +505,23 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if (dbs_enable == 1) {
kondemand_wq = create_workqueue("kondemand");
if (!kondemand_wq) {
- printk(KERN_ERR "Creation of kondemand failed\n");
+ printk(KERN_ERR
+ "Creation of kondemand failed\n");
dbs_enable--;
mutex_unlock(&dbs_mutex);
return -ENOSPC;
}
}
+
+ rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
+ if (rc) {
+ if (dbs_enable == 1)
+ destroy_workqueue(kondemand_wq);
+ dbs_enable--;
+ mutex_unlock(&dbs_mutex);
+ return rc;
+ }
+
for_each_cpu_mask(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -509,7 +531,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info->prev_cpu_wall = get_jiffies_64();
}
this_dbs_info->enable = 1;
- sysfs_create_group(&policy->kobj, &dbs_attr_group);
/*
* Start the timerschedule work, when this governor
* is used for first time
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index de91e3371ef..e8e1451ef1c 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -15,7 +15,8 @@
#include <linux/cpufreq.h>
#include <linux/init.h>
-#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "performance", msg)
+#define dprintk(msg...) \
+ cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "performance", msg)
static int cpufreq_governor_performance(struct cpufreq_policy *policy,
@@ -24,8 +25,10 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy,
switch (event) {
case CPUFREQ_GOV_START:
case CPUFREQ_GOV_LIMITS:
- dprintk("setting to %u kHz because of event %u\n", policy->max, event);
- __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+ dprintk("setting to %u kHz because of event %u\n",
+ policy->max, event);
+ __cpufreq_driver_target(policy, policy->max,
+ CPUFREQ_RELATION_H);
break;
default:
break;
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index 0a2596044e6..13fe06b94b0 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -15,7 +15,8 @@
#include <linux/cpufreq.h>
#include <linux/init.h>
-#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "powersave", msg)
+#define dprintk(msg...) \
+ cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "powersave", msg)
static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
unsigned int event)
@@ -23,8 +24,10 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
switch (event) {
case CPUFREQ_GOV_START:
case CPUFREQ_GOV_LIMITS:
- dprintk("setting to %u kHz because of event %u\n", policy->min, event);
- __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+ dprintk("setting to %u kHz because of event %u\n",
+ policy->min, event);
+ __cpufreq_driver_target(policy, policy->min,
+ CPUFREQ_RELATION_L);
break;
default:
break;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index c2ecc599dc5..6742b1adf2c 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -351,8 +351,8 @@ __init cpufreq_stats_init(void)
register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
for_each_online_cpu(cpu) {
- cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE,
- (void *)(long)cpu);
+ cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier,
+ CPU_ONLINE, (void *)(long)cpu);
}
return 0;
}
@@ -368,14 +368,15 @@ __exit cpufreq_stats_exit(void)
unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
lock_cpu_hotplug();
for_each_online_cpu(cpu) {
- cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD,
- (void *)(long)cpu);
+ cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier,
+ CPU_DEAD, (void *)(long)cpu);
}
unlock_cpu_hotplug();
}
MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>");
-MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats through sysfs filesystem");
+MODULE_DESCRIPTION ("'cpufreq_stats' - A driver to export cpufreq stats"
+ "through sysfs filesystem");
MODULE_LICENSE ("GPL");
module_init(cpufreq_stats_init);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index a06c204589c..2a4eb0bfaf3 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -131,19 +131,26 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
unsigned int event)
{
unsigned int cpu = policy->cpu;
+ int rc = 0;
+
switch (event) {
case CPUFREQ_GOV_START:
if (!cpu_online(cpu))
return -EINVAL;
BUG_ON(!policy->cur);
mutex_lock(&userspace_mutex);
+ rc = sysfs_create_file (&policy->kobj,
+ &freq_attr_scaling_setspeed.attr);
+ if (rc)
+ goto start_out;
+
cpu_is_managed[cpu] = 1;
cpu_min_freq[cpu] = policy->min;
cpu_max_freq[cpu] = policy->max;
cpu_cur_freq[cpu] = policy->cur;
cpu_set_freq[cpu] = policy->cur;
- sysfs_create_file (&policy->kobj, &freq_attr_scaling_setspeed.attr);
dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]);
+start_out:
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_STOP:
@@ -180,7 +187,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
mutex_unlock(&userspace_mutex);
break;
}
- return 0;
+ return rc;
}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 551f4ccf87f..e7490925fdc 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -9,7 +9,8 @@
#include <linux/init.h>
#include <linux/cpufreq.h>
-#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "freq-table", msg)
+#define dprintk(msg...) \
+ cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "freq-table", msg)
/*********************************************************************
* FREQUENCY TABLE HELPERS *
@@ -29,7 +30,8 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
continue;
}
- dprintk("table entry %u: %u kHz, %u index\n", i, freq, table[i].index);
+ dprintk("table entry %u: %u kHz, %u index\n",
+ i, freq, table[i].index);
if (freq < min_freq)
min_freq = freq;
if (freq > max_freq)
@@ -54,13 +56,14 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
unsigned int i;
unsigned int count = 0;
- dprintk("request for verification of policy (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu);
+ dprintk("request for verification of policy (%u - %u kHz) for cpu %u\n",
+ policy->min, policy->max, policy->cpu);
if (!cpu_online(policy->cpu))
return -EINVAL;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
unsigned int freq = table[i].frequency;
@@ -75,10 +78,11 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
if (!count)
policy->max = next_larger;
- cpufreq_verify_within_limits(policy,
- policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
- dprintk("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu);
+ dprintk("verification lead to (%u - %u kHz) for cpu %u\n",
+ policy->min, policy->max, policy->cpu);
return 0;
}
@@ -101,7 +105,8 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
};
unsigned int i;
- dprintk("request for target %u kHz (relation: %u) for cpu %u\n", target_freq, relation, policy->cpu);
+ dprintk("request for target %u kHz (relation: %u) for cpu %u\n",
+ target_freq, relation, policy->cpu);
switch (relation) {
case CPUFREQ_RELATION_H:
@@ -192,7 +197,10 @@ static ssize_t show_available_freqs (struct cpufreq_policy *policy, char *buf)
}
struct freq_attr cpufreq_freq_attr_scaling_available_freqs = {
- .attr = { .name = "scaling_available_frequencies", .mode = 0444, .owner=THIS_MODULE },
+ .attr = { .name = "scaling_available_frequencies",
+ .mode = 0444,
+ .owner=THIS_MODULE
+ },
.show = show_available_freqs,
};
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index e23bc0d6215..3f828052f8d 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -796,7 +796,7 @@ endchoice
config BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ
int "Maximum transfer size (KB) per request (up to 128)"
default "128"
- depends BLK_DEV_IDE_AU1XXX
+ depends on BLK_DEV_IDE_AU1XXX
config IDE_ARM
def_bool ARM && (ARCH_A5K || ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK)
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 163d991eb8c..50fb1cd447b 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,9 +1,11 @@
infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o
+user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
ib_cm.o iw_cm.o $(infiniband-y)
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
-obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o
+obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
+ $(user_access-y)
ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
device.o fmr_pool.o cache.o
@@ -18,6 +20,8 @@ iw_cm-y := iwcm.o
rdma_cm-y := cma.o
+rdma_ucm-y := ucma.o
+
ib_addr-y := addr.o
ib_umad-y := user_mad.o
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 79c937bf696..d446998b12a 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3289,6 +3289,10 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id_priv->id.state) {
+ /* Allow transition to RTS before sending REP */
+ case IB_CM_REQ_RCVD:
+ case IB_CM_MRA_REQ_SENT:
+
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
case IB_CM_REP_SENT:
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 985a6b564d8..533193d4e5d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -70,6 +70,7 @@ static DEFINE_MUTEX(lock);
static struct workqueue_struct *cma_wq;
static DEFINE_IDR(sdp_ps);
static DEFINE_IDR(tcp_ps);
+static DEFINE_IDR(udp_ps);
struct cma_device {
struct list_head list;
@@ -133,7 +134,6 @@ struct rdma_id_private {
u32 seq_num;
u32 qp_num;
- enum ib_qp_type qp_type;
u8 srq;
};
@@ -392,7 +392,6 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
id->qp = qp;
id_priv->qp_num = qp->qp_num;
- id_priv->qp_type = qp->qp_type;
id_priv->srq = (qp->srq != NULL);
return 0;
err:
@@ -510,9 +509,17 @@ static inline int cma_any_addr(struct sockaddr *addr)
return cma_zero_addr(addr) || cma_loopback_addr(addr);
}
+static inline __be16 cma_port(struct sockaddr *addr)
+{
+ if (addr->sa_family == AF_INET)
+ return ((struct sockaddr_in *) addr)->sin_port;
+ else
+ return ((struct sockaddr_in6 *) addr)->sin6_port;
+}
+
static inline int cma_any_port(struct sockaddr *addr)
{
- return !((struct sockaddr_in *) addr)->sin_port;
+ return !cma_port(addr);
}
static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
@@ -594,20 +601,6 @@ static inline int cma_user_data_offset(enum rdma_port_space ps)
}
}
-static int cma_notify_user(struct rdma_id_private *id_priv,
- enum rdma_cm_event_type type, int status,
- void *data, u8 data_len)
-{
- struct rdma_cm_event event;
-
- event.event = type;
- event.status = status;
- event.private_data = data;
- event.private_data_len = data_len;
-
- return id_priv->id.event_handler(&id_priv->id, &event);
-}
-
static void cma_cancel_route(struct rdma_id_private *id_priv)
{
switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
@@ -776,63 +769,61 @@ static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
return 0;
}
-static int cma_rtu_recv(struct rdma_id_private *id_priv)
+static void cma_set_rep_event_data(struct rdma_cm_event *event,
+ struct ib_cm_rep_event_param *rep_data,
+ void *private_data)
{
- int ret;
-
- ret = cma_modify_qp_rts(&id_priv->id);
- if (ret)
- goto reject;
-
- return 0;
-reject:
- cma_modify_qp_err(&id_priv->id);
- ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
- NULL, 0, NULL, 0);
- return ret;
+ event->param.conn.private_data = private_data;
+ event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
+ event->param.conn.responder_resources = rep_data->responder_resources;
+ event->param.conn.initiator_depth = rep_data->initiator_depth;
+ event->param.conn.flow_control = rep_data->flow_control;
+ event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
+ event->param.conn.srq = rep_data->srq;
+ event->param.conn.qp_num = rep_data->remote_qpn;
}
static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
{
struct rdma_id_private *id_priv = cm_id->context;
- enum rdma_cm_event_type event;
- u8 private_data_len = 0;
- int ret = 0, status = 0;
+ struct rdma_cm_event event;
+ int ret = 0;
atomic_inc(&id_priv->dev_remove);
if (!cma_comp(id_priv, CMA_CONNECT))
goto out;
+ memset(&event, 0, sizeof event);
switch (ib_event->event) {
case IB_CM_REQ_ERROR:
case IB_CM_REP_ERROR:
- event = RDMA_CM_EVENT_UNREACHABLE;
- status = -ETIMEDOUT;
+ event.event = RDMA_CM_EVENT_UNREACHABLE;
+ event.status = -ETIMEDOUT;
break;
case IB_CM_REP_RECEIVED:
- status = cma_verify_rep(id_priv, ib_event->private_data);
- if (status)
- event = RDMA_CM_EVENT_CONNECT_ERROR;
+ event.status = cma_verify_rep(id_priv, ib_event->private_data);
+ if (event.status)
+ event.event = RDMA_CM_EVENT_CONNECT_ERROR;
else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
- status = cma_rep_recv(id_priv);
- event = status ? RDMA_CM_EVENT_CONNECT_ERROR :
- RDMA_CM_EVENT_ESTABLISHED;
+ event.status = cma_rep_recv(id_priv);
+ event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
+ RDMA_CM_EVENT_ESTABLISHED;
} else
- event = RDMA_CM_EVENT_CONNECT_RESPONSE;
- private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
+ event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
+ cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
+ ib_event->private_data);
break;
case IB_CM_RTU_RECEIVED:
- status = cma_rtu_recv(id_priv);
- event = status ? RDMA_CM_EVENT_CONNECT_ERROR :
- RDMA_CM_EVENT_ESTABLISHED;
+ case IB_CM_USER_ESTABLISHED:
+ event.event = RDMA_CM_EVENT_ESTABLISHED;
break;
case IB_CM_DREQ_ERROR:
- status = -ETIMEDOUT; /* fall through */
+ event.status = -ETIMEDOUT; /* fall through */
case IB_CM_DREQ_RECEIVED:
case IB_CM_DREP_RECEIVED:
if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
goto out;
- event = RDMA_CM_EVENT_DISCONNECTED;
+ event.event = RDMA_CM_EVENT_DISCONNECTED;
break;
case IB_CM_TIMEWAIT_EXIT:
case IB_CM_MRA_RECEIVED:
@@ -840,9 +831,10 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
goto out;
case IB_CM_REJ_RECEIVED:
cma_modify_qp_err(&id_priv->id);
- status = ib_event->param.rej_rcvd.reason;
- event = RDMA_CM_EVENT_REJECTED;
- private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
+ event.status = ib_event->param.rej_rcvd.reason;
+ event.event = RDMA_CM_EVENT_REJECTED;
+ event.param.conn.private_data = ib_event->private_data;
+ event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
break;
default:
printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
@@ -850,8 +842,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
goto out;
}
- ret = cma_notify_user(id_priv, event, status, ib_event->private_data,
- private_data_len);
+ ret = id_priv->id.event_handler(&id_priv->id, &event);
if (ret) {
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.ib = NULL;
@@ -865,8 +856,8 @@ out:
return ret;
}
-static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id,
- struct ib_cm_event *ib_event)
+static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
+ struct ib_cm_event *ib_event)
{
struct rdma_id_private *id_priv;
struct rdma_cm_id *id;
@@ -913,9 +904,61 @@ err:
return NULL;
}
+static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
+ struct ib_cm_event *ib_event)
+{
+ struct rdma_id_private *id_priv;
+ struct rdma_cm_id *id;
+ union cma_ip_addr *src, *dst;
+ __u16 port;
+ u8 ip_ver;
+ int ret;
+
+ id = rdma_create_id(listen_id->event_handler, listen_id->context,
+ listen_id->ps);
+ if (IS_ERR(id))
+ return NULL;
+
+
+ if (cma_get_net_info(ib_event->private_data, listen_id->ps,
+ &ip_ver, &port, &src, &dst))
+ goto err;
+
+ cma_save_net_info(&id->route.addr, &listen_id->route.addr,
+ ip_ver, port, src, dst);
+
+ ret = rdma_translate_ip(&id->route.addr.src_addr,
+ &id->route.addr.dev_addr);
+ if (ret)
+ goto err;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ id_priv->state = CMA_CONNECT;
+ return id_priv;
+err:
+ rdma_destroy_id(id);
+ return NULL;
+}
+
+static void cma_set_req_event_data(struct rdma_cm_event *event,
+ struct ib_cm_req_event_param *req_data,
+ void *private_data, int offset)
+{
+ event->param.conn.private_data = private_data + offset;
+ event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
+ event->param.conn.responder_resources = req_data->responder_resources;
+ event->param.conn.initiator_depth = req_data->initiator_depth;
+ event->param.conn.flow_control = req_data->flow_control;
+ event->param.conn.retry_count = req_data->retry_count;
+ event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
+ event->param.conn.srq = req_data->srq;
+ event->param.conn.qp_num = req_data->remote_qpn;
+}
+
static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
{
struct rdma_id_private *listen_id, *conn_id;
+ struct rdma_cm_event event;
int offset, ret;
listen_id = cm_id->context;
@@ -925,7 +968,19 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
goto out;
}
- conn_id = cma_new_id(&listen_id->id, ib_event);
+ memset(&event, 0, sizeof event);
+ offset = cma_user_data_offset(listen_id->id.ps);
+ event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
+ if (listen_id->id.ps == RDMA_PS_UDP) {
+ conn_id = cma_new_udp_id(&listen_id->id, ib_event);
+ event.param.ud.private_data = ib_event->private_data + offset;
+ event.param.ud.private_data_len =
+ IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
+ } else {
+ conn_id = cma_new_conn_id(&listen_id->id, ib_event);
+ cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
+ ib_event->private_data, offset);
+ }
if (!conn_id) {
ret = -ENOMEM;
goto out;
@@ -942,10 +997,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
cm_id->context = conn_id;
cm_id->cm_handler = cma_ib_handler;
- offset = cma_user_data_offset(listen_id->id.ps);
- ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
- ib_event->private_data + offset,
- IB_CM_REQ_PRIVATE_DATA_SIZE - offset);
+ ret = conn_id->id.event_handler(&conn_id->id, &event);
if (!ret)
goto out;
@@ -964,8 +1016,7 @@ out:
static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
{
- return cpu_to_be64(((u64)ps << 16) +
- be16_to_cpu(((struct sockaddr_in *) addr)->sin_port));
+ return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));
}
static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
@@ -1021,15 +1072,16 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
{
struct rdma_id_private *id_priv = iw_id->context;
- enum rdma_cm_event_type event = 0;
+ struct rdma_cm_event event;
struct sockaddr_in *sin;
int ret = 0;
+ memset(&event, 0, sizeof event);
atomic_inc(&id_priv->dev_remove);
switch (iw_event->event) {
case IW_CM_EVENT_CLOSE:
- event = RDMA_CM_EVENT_DISCONNECTED;
+ event.event = RDMA_CM_EVENT_DISCONNECTED;
break;
case IW_CM_EVENT_CONNECT_REPLY:
sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
@@ -1037,20 +1089,21 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
*sin = iw_event->remote_addr;
if (iw_event->status)
- event = RDMA_CM_EVENT_REJECTED;
+ event.event = RDMA_CM_EVENT_REJECTED;
else
- event = RDMA_CM_EVENT_ESTABLISHED;
+ event.event = RDMA_CM_EVENT_ESTABLISHED;
break;
case IW_CM_EVENT_ESTABLISHED:
- event = RDMA_CM_EVENT_ESTABLISHED;
+ event.event = RDMA_CM_EVENT_ESTABLISHED;
break;
default:
BUG_ON(1);
}
- ret = cma_notify_user(id_priv, event, iw_event->status,
- iw_event->private_data,
- iw_event->private_data_len);
+ event.status = iw_event->status;
+ event.param.conn.private_data = iw_event->private_data;
+ event.param.conn.private_data_len = iw_event->private_data_len;
+ ret = id_priv->id.event_handler(&id_priv->id, &event);
if (ret) {
/* Destroy the CM ID by returning a non-zero value. */
id_priv->cm_id.iw = NULL;
@@ -1071,6 +1124,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
struct rdma_id_private *listen_id, *conn_id;
struct sockaddr_in *sin;
struct net_device *dev = NULL;
+ struct rdma_cm_event event;
int ret;
listen_id = cm_id->context;
@@ -1124,9 +1178,11 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
*sin = iw_event->remote_addr;
- ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
- iw_event->private_data,
- iw_event->private_data_len);
+ memset(&event, 0, sizeof event);
+ event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
+ event.param.conn.private_data = iw_event->private_data;
+ event.param.conn.private_data_len = iw_event->private_data_len;
+ ret = conn_id->id.event_handler(&conn_id->id, &event);
if (ret) {
/* User wants to destroy the CM ID */
conn_id->cm_id.iw = NULL;
@@ -1515,8 +1571,9 @@ static void addr_handler(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *dev_addr, void *context)
{
struct rdma_id_private *id_priv = context;
- enum rdma_cm_event_type event;
+ struct rdma_cm_event event;
+ memset(&event, 0, sizeof event);
atomic_inc(&id_priv->dev_remove);
/*
@@ -1536,14 +1593,15 @@ static void addr_handler(int status, struct sockaddr *src_addr,
if (status) {
if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
goto out;
- event = RDMA_CM_EVENT_ADDR_ERROR;
+ event.event = RDMA_CM_EVENT_ADDR_ERROR;
+ event.status = status;
} else {
memcpy(&id_priv->id.route.addr.src_addr, src_addr,
ip_addr_size(src_addr));
- event = RDMA_CM_EVENT_ADDR_RESOLVED;
+ event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
}
- if (cma_notify_user(id_priv, event, status, NULL, 0)) {
+ if (id_priv->id.event_handler(&id_priv->id, &event)) {
cma_exch(id_priv, CMA_DESTROYING);
cma_release_remove(id_priv);
cma_deref_id(id_priv);
@@ -1733,6 +1791,9 @@ static int cma_get_port(struct rdma_id_private *id_priv)
case RDMA_PS_TCP:
ps = &tcp_ps;
break;
+ case RDMA_PS_UDP:
+ ps = &udp_ps;
+ break;
default:
return -EPROTONOSUPPORT;
}
@@ -1821,6 +1882,110 @@ static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
return 0;
}
+static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
+ struct ib_cm_event *ib_event)
+{
+ struct rdma_id_private *id_priv = cm_id->context;
+ struct rdma_cm_event event;
+ struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
+ int ret = 0;
+
+ memset(&event, 0, sizeof event);
+ atomic_inc(&id_priv->dev_remove);
+ if (!cma_comp(id_priv, CMA_CONNECT))
+ goto out;
+
+ switch (ib_event->event) {
+ case IB_CM_SIDR_REQ_ERROR:
+ event.event = RDMA_CM_EVENT_UNREACHABLE;
+ event.status = -ETIMEDOUT;
+ break;
+ case IB_CM_SIDR_REP_RECEIVED:
+ event.param.ud.private_data = ib_event->private_data;
+ event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
+ if (rep->status != IB_SIDR_SUCCESS) {
+ event.event = RDMA_CM_EVENT_UNREACHABLE;
+ event.status = ib_event->param.sidr_rep_rcvd.status;
+ break;
+ }
+ if (rep->qkey != RDMA_UD_QKEY) {
+ event.event = RDMA_CM_EVENT_UNREACHABLE;
+ event.status = -EINVAL;
+ break;
+ }
+ ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
+ id_priv->id.route.path_rec,
+ &event.param.ud.ah_attr);
+ event.param.ud.qp_num = rep->qpn;
+ event.param.ud.qkey = rep->qkey;
+ event.event = RDMA_CM_EVENT_ESTABLISHED;
+ event.status = 0;
+ break;
+ default:
+ printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
+ ib_event->event);
+ goto out;
+ }
+
+ ret = id_priv->id.event_handler(&id_priv->id, &event);
+ if (ret) {
+ /* Destroy the CM ID by returning a non-zero value. */
+ id_priv->cm_id.ib = NULL;
+ cma_exch(id_priv, CMA_DESTROYING);
+ cma_release_remove(id_priv);
+ rdma_destroy_id(&id_priv->id);
+ return ret;
+ }
+out:
+ cma_release_remove(id_priv);
+ return ret;
+}
+
+static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
+ struct rdma_conn_param *conn_param)
+{
+ struct ib_cm_sidr_req_param req;
+ struct rdma_route *route;
+ int ret;
+
+ req.private_data_len = sizeof(struct cma_hdr) +
+ conn_param->private_data_len;
+ req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
+ if (!req.private_data)
+ return -ENOMEM;
+
+ if (conn_param->private_data && conn_param->private_data_len)
+ memcpy((void *) req.private_data + sizeof(struct cma_hdr),
+ conn_param->private_data, conn_param->private_data_len);
+
+ route = &id_priv->id.route;
+ ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
+ if (ret)
+ goto out;
+
+ id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
+ cma_sidr_rep_handler, id_priv);
+ if (IS_ERR(id_priv->cm_id.ib)) {
+ ret = PTR_ERR(id_priv->cm_id.ib);
+ goto out;
+ }
+
+ req.path = route->path_rec;
+ req.service_id = cma_get_service_id(id_priv->id.ps,
+ &route->addr.dst_addr);
+ req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
+ req.max_cm_retries = CMA_MAX_CM_RETRIES;
+
+ ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
+ if (ret) {
+ ib_destroy_cm_id(id_priv->cm_id.ib);
+ id_priv->cm_id.ib = NULL;
+ }
+out:
+ kfree(req.private_data);
+ return ret;
+}
+
static int cma_connect_ib(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param)
{
@@ -1860,7 +2025,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
req.service_id = cma_get_service_id(id_priv->id.ps,
&route->addr.dst_addr);
req.qp_num = id_priv->qp_num;
- req.qp_type = id_priv->qp_type;
+ req.qp_type = IB_QPT_RC;
req.starting_psn = id_priv->seq_num;
req.responder_resources = conn_param->responder_resources;
req.initiator_depth = conn_param->initiator_depth;
@@ -1937,13 +2102,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
if (!id->qp) {
id_priv->qp_num = conn_param->qp_num;
- id_priv->qp_type = conn_param->qp_type;
id_priv->srq = conn_param->srq;
}
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- ret = cma_connect_ib(id_priv, conn_param);
+ if (id->ps == RDMA_PS_UDP)
+ ret = cma_resolve_ib_udp(id_priv, conn_param);
+ else
+ ret = cma_connect_ib(id_priv, conn_param);
break;
case RDMA_TRANSPORT_IWARP:
ret = cma_connect_iw(id_priv, conn_param);
@@ -1966,11 +2133,25 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param)
{
struct ib_cm_rep_param rep;
- int ret;
+ struct ib_qp_attr qp_attr;
+ int qp_attr_mask, ret;
- ret = cma_modify_qp_rtr(&id_priv->id);
- if (ret)
- return ret;
+ if (id_priv->id.qp) {
+ ret = cma_modify_qp_rtr(&id_priv->id);
+ if (ret)
+ goto out;
+
+ qp_attr.qp_state = IB_QPS_RTS;
+ ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr,
+ &qp_attr_mask);
+ if (ret)
+ goto out;
+
+ qp_attr.max_rd_atomic = conn_param->initiator_depth;
+ ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
+ if (ret)
+ goto out;
+ }
memset(&rep, 0, sizeof rep);
rep.qp_num = id_priv->qp_num;
@@ -1985,7 +2166,9 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
rep.rnr_retry_count = conn_param->rnr_retry_count;
rep.srq = id_priv->srq ? 1 : 0;
- return ib_send_cm_rep(id_priv->cm_id.ib, &rep);
+ ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
+out:
+ return ret;
}
static int cma_accept_iw(struct rdma_id_private *id_priv,
@@ -2010,6 +2193,24 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
}
+static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
+ enum ib_cm_sidr_status status,
+ const void *private_data, int private_data_len)
+{
+ struct ib_cm_sidr_rep_param rep;
+
+ memset(&rep, 0, sizeof rep);
+ rep.status = status;
+ if (status == IB_SIDR_SUCCESS) {
+ rep.qp_num = id_priv->qp_num;
+ rep.qkey = RDMA_UD_QKEY;
+ }
+ rep.private_data = private_data;
+ rep.private_data_len = private_data_len;
+
+ return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
+}
+
int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
{
struct rdma_id_private *id_priv;
@@ -2021,13 +2222,16 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
if (!id->qp && conn_param) {
id_priv->qp_num = conn_param->qp_num;
- id_priv->qp_type = conn_param->qp_type;
id_priv->srq = conn_param->srq;
}
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- if (conn_param)
+ if (id->ps == RDMA_PS_UDP)
+ ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
+ conn_param->private_data,
+ conn_param->private_data_len);
+ else if (conn_param)
ret = cma_accept_ib(id_priv, conn_param);
else
ret = cma_rep_recv(id_priv);
@@ -2051,6 +2255,27 @@ reject:
}
EXPORT_SYMBOL(rdma_accept);
+int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
+{
+ struct rdma_id_private *id_priv;
+ int ret;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ if (!cma_comp(id_priv, CMA_CONNECT))
+ return -EINVAL;
+
+ switch (id->device->node_type) {
+ case RDMA_NODE_IB_CA:
+ ret = ib_cm_notify(id_priv->cm_id.ib, event);
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(rdma_notify);
+
int rdma_reject(struct rdma_cm_id *id, const void *private_data,
u8 private_data_len)
{
@@ -2063,9 +2288,13 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
switch (rdma_node_get_transport(id->device->node_type)) {
case RDMA_TRANSPORT_IB:
- ret = ib_send_cm_rej(id_priv->cm_id.ib,
- IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
- private_data, private_data_len);
+ if (id->ps == RDMA_PS_UDP)
+ ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
+ private_data, private_data_len);
+ else
+ ret = ib_send_cm_rej(id_priv->cm_id.ib,
+ IB_CM_REJ_CONSUMER_DEFINED, NULL,
+ 0, private_data, private_data_len);
break;
case RDMA_TRANSPORT_IWARP:
ret = iw_cm_reject(id_priv->cm_id.iw,
@@ -2136,6 +2365,7 @@ static void cma_add_one(struct ib_device *device)
static int cma_remove_id_dev(struct rdma_id_private *id_priv)
{
+ struct rdma_cm_event event;
enum cma_state state;
/* Record that we want to remove the device */
@@ -2150,8 +2380,9 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
return 0;
- return cma_notify_user(id_priv, RDMA_CM_EVENT_DEVICE_REMOVAL,
- 0, NULL, 0);
+ memset(&event, 0, sizeof event);
+ event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
+ return id_priv->id.event_handler(&id_priv->id, &event);
}
static void cma_process_remove(struct cma_device *cma_dev)
@@ -2233,6 +2464,7 @@ static void cma_cleanup(void)
destroy_workqueue(cma_wq);
idr_destroy(&sdp_ps);
idr_destroy(&tcp_ps);
+ idr_destroy(&udp_ps);
}
module_init(cma_init);
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 86a3b2d401d..8926a2bd4a8 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -394,20 +394,12 @@ EXPORT_SYMBOL(ib_destroy_fmr_pool);
*/
int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
{
- int serial;
-
- atomic_inc(&pool->req_ser);
- /*
- * It's OK if someone else bumps req_ser again here -- we'll
- * just wait a little longer.
- */
- serial = atomic_read(&pool->req_ser);
+ int serial = atomic_inc_return(&pool->req_ser);
wake_up_process(pool->thread);
if (wait_event_interruptible(pool->force_wait,
- atomic_read(&pool->flush_ser) -
- atomic_read(&pool->req_ser) >= 0))
+ atomic_read(&pool->flush_ser) - serial >= 0))
return -EINTR;
return 0;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 15f38d94b3a..5ed141ebd1c 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -998,17 +998,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
mad_agent = mad_send_wr->send_buf.mad_agent;
sge = mad_send_wr->sg_list;
- sge[0].addr = dma_map_single(mad_agent->device->dma_device,
- mad_send_wr->send_buf.mad,
- sge[0].length,
- DMA_TO_DEVICE);
- pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr);
-
- sge[1].addr = dma_map_single(mad_agent->device->dma_device,
- ib_get_payload(mad_send_wr),
- sge[1].length,
- DMA_TO_DEVICE);
- pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr);
+ sge[0].addr = ib_dma_map_single(mad_agent->device,
+ mad_send_wr->send_buf.mad,
+ sge[0].length,
+ DMA_TO_DEVICE);
+ mad_send_wr->header_mapping = sge[0].addr;
+
+ sge[1].addr = ib_dma_map_single(mad_agent->device,
+ ib_get_payload(mad_send_wr),
+ sge[1].length,
+ DMA_TO_DEVICE);
+ mad_send_wr->payload_mapping = sge[1].addr;
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
@@ -1026,12 +1026,12 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
}
spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
if (ret) {
- dma_unmap_single(mad_agent->device->dma_device,
- pci_unmap_addr(mad_send_wr, header_mapping),
- sge[0].length, DMA_TO_DEVICE);
- dma_unmap_single(mad_agent->device->dma_device,
- pci_unmap_addr(mad_send_wr, payload_mapping),
- sge[1].length, DMA_TO_DEVICE);
+ ib_dma_unmap_single(mad_agent->device,
+ mad_send_wr->header_mapping,
+ sge[0].length, DMA_TO_DEVICE);
+ ib_dma_unmap_single(mad_agent->device,
+ mad_send_wr->payload_mapping,
+ sge[1].length, DMA_TO_DEVICE);
}
return ret;
}
@@ -1850,11 +1850,11 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
mad_list);
recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
- dma_unmap_single(port_priv->device->dma_device,
- pci_unmap_addr(&recv->header, mapping),
- sizeof(struct ib_mad_private) -
- sizeof(struct ib_mad_private_header),
- DMA_FROM_DEVICE);
+ ib_dma_unmap_single(port_priv->device,
+ recv->header.mapping,
+ sizeof(struct ib_mad_private) -
+ sizeof(struct ib_mad_private_header),
+ DMA_FROM_DEVICE);
/* Setup MAD receive work completion from "normal" work completion */
recv->header.wc = *wc;
@@ -2080,12 +2080,12 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
qp_info = send_queue->qp_info;
retry:
- dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
- pci_unmap_addr(mad_send_wr, header_mapping),
- mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
- dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
- pci_unmap_addr(mad_send_wr, payload_mapping),
- mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
+ ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
+ mad_send_wr->header_mapping,
+ mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
+ ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
+ mad_send_wr->payload_mapping,
+ mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
queued_send_wr = NULL;
spin_lock_irqsave(&send_queue->lock, flags);
list_del(&mad_list->list);
@@ -2528,13 +2528,12 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
break;
}
}
- sg_list.addr = dma_map_single(qp_info->port_priv->
- device->dma_device,
- &mad_priv->grh,
- sizeof *mad_priv -
- sizeof mad_priv->header,
- DMA_FROM_DEVICE);
- pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
+ sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
+ &mad_priv->grh,
+ sizeof *mad_priv -
+ sizeof mad_priv->header,
+ DMA_FROM_DEVICE);
+ mad_priv->header.mapping = sg_list.addr;
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
mad_priv->header.mad_list.mad_queue = recv_queue;
@@ -2549,12 +2548,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
list_del(&mad_priv->header.mad_list.list);
recv_queue->count--;
spin_unlock_irqrestore(&recv_queue->lock, flags);
- dma_unmap_single(qp_info->port_priv->device->dma_device,
- pci_unmap_addr(&mad_priv->header,
- mapping),
- sizeof *mad_priv -
- sizeof mad_priv->header,
- DMA_FROM_DEVICE);
+ ib_dma_unmap_single(qp_info->port_priv->device,
+ mad_priv->header.mapping,
+ sizeof *mad_priv -
+ sizeof mad_priv->header,
+ DMA_FROM_DEVICE);
kmem_cache_free(ib_mad_cache, mad_priv);
printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
break;
@@ -2586,11 +2584,11 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
/* Remove from posted receive MAD list */
list_del(&mad_list->list);
- dma_unmap_single(qp_info->port_priv->device->dma_device,
- pci_unmap_addr(&recv->header, mapping),
- sizeof(struct ib_mad_private) -
- sizeof(struct ib_mad_private_header),
- DMA_FROM_DEVICE);
+ ib_dma_unmap_single(qp_info->port_priv->device,
+ recv->header.mapping,
+ sizeof(struct ib_mad_private) -
+ sizeof(struct ib_mad_private_header),
+ DMA_FROM_DEVICE);
kmem_cache_free(ib_mad_cache, recv);
}
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d5548e73e06..de89717f49f 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -73,7 +73,7 @@ struct ib_mad_private_header {
struct ib_mad_list_head mad_list;
struct ib_mad_recv_wc recv_wc;
struct ib_wc wc;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ u64 mapping;
} __attribute__ ((packed));
struct ib_mad_private {
@@ -126,8 +126,8 @@ struct ib_mad_send_wr_private {
struct list_head agent_list;
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_buf send_buf;
- DECLARE_PCI_UNMAP_ADDR(header_mapping)
- DECLARE_PCI_UNMAP_ADDR(payload_mapping)
+ u64 header_mapping;
+ u64 payload_mapping;
struct ib_send_wr send_wr;
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
__be64 tid;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
new file mode 100644
index 00000000000..81a5cdc5733
--- /dev/null
+++ b/drivers/infiniband/core/ucma.c
@@ -0,0 +1,874 @@
+/*
+ * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/idr.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/miscdevice.h>
+
+#include <rdma/rdma_user_cm.h>
+#include <rdma/ib_marshall.h>
+#include <rdma/rdma_cm.h>
+
+MODULE_AUTHOR("Sean Hefty");
+MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
+MODULE_LICENSE("Dual BSD/GPL");
+
+enum {
+ UCMA_MAX_BACKLOG = 128
+};
+
+struct ucma_file {
+ struct mutex mut;
+ struct file *filp;
+ struct list_head ctx_list;
+ struct list_head event_list;
+ wait_queue_head_t poll_wait;
+};
+
+struct ucma_context {
+ int id;
+ struct completion comp;
+ atomic_t ref;
+ int events_reported;
+ int backlog;
+
+ struct ucma_file *file;
+ struct rdma_cm_id *cm_id;
+ u64 uid;
+
+ struct list_head list;
+};
+
+struct ucma_event {
+ struct ucma_context *ctx;
+ struct list_head list;
+ struct rdma_cm_id *cm_id;
+ struct rdma_ucm_event_resp resp;
+};
+
+static DEFINE_MUTEX(mut);
+static DEFINE_IDR(ctx_idr);
+
+static inline struct ucma_context *_ucma_find_context(int id,
+ struct ucma_file *file)
+{
+ struct ucma_context *ctx;
+
+ ctx = idr_find(&ctx_idr, id);
+ if (!ctx)
+ ctx = ERR_PTR(-ENOENT);
+ else if (ctx->file != file)
+ ctx = ERR_PTR(-EINVAL);
+ return ctx;
+}
+
+static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
+{
+ struct ucma_context *ctx;
+
+ mutex_lock(&mut);
+ ctx = _ucma_find_context(id, file);
+ if (!IS_ERR(ctx))
+ atomic_inc(&ctx->ref);
+ mutex_unlock(&mut);
+ return ctx;
+}
+
+static void ucma_put_ctx(struct ucma_context *ctx)
+{
+ if (atomic_dec_and_test(&ctx->ref))
+ complete(&ctx->comp);
+}
+
+static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
+{
+ struct ucma_context *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ atomic_set(&ctx->ref, 1);
+ init_completion(&ctx->comp);
+ ctx->file = file;
+
+ do {
+ ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
+ if (!ret)
+ goto error;
+
+ mutex_lock(&mut);
+ ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
+ mutex_unlock(&mut);
+ } while (ret == -EAGAIN);
+
+ if (ret)
+ goto error;
+
+ list_add_tail(&ctx->list, &file->ctx_list);
+ return ctx;
+
+error:
+ kfree(ctx);
+ return NULL;
+}
+
+static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
+ struct rdma_conn_param *src)
+{
+ if (src->private_data_len)
+ memcpy(dst->private_data, src->private_data,
+ src->private_data_len);
+ dst->private_data_len = src->private_data_len;
+ dst->responder_resources =src->responder_resources;
+ dst->initiator_depth = src->initiator_depth;
+ dst->flow_control = src->flow_control;
+ dst->retry_count = src->retry_count;
+ dst->rnr_retry_count = src->rnr_retry_count;
+ dst->srq = src->srq;
+ dst->qp_num = src->qp_num;
+}
+
+static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
+ struct rdma_ud_param *src)
+{
+ if (src->private_data_len)
+ memcpy(dst->private_data, src->private_data,
+ src->private_data_len);
+ dst->private_data_len = src->private_data_len;
+ ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
+ dst->qp_num = src->qp_num;
+ dst->qkey = src->qkey;
+}
+
+static void ucma_set_event_context(struct ucma_context *ctx,
+ struct rdma_cm_event *event,
+ struct ucma_event *uevent)
+{
+ uevent->ctx = ctx;
+ uevent->resp.uid = ctx->uid;
+ uevent->resp.id = ctx->id;
+}
+
+static int ucma_event_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct ucma_event *uevent;
+ struct ucma_context *ctx = cm_id->context;
+ int ret = 0;
+
+ uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
+ if (!uevent)
+ return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
+
+ uevent->cm_id = cm_id;
+ ucma_set_event_context(ctx, event, uevent);
+ uevent->resp.event = event->event;
+ uevent->resp.status = event->status;
+ if (cm_id->ps == RDMA_PS_UDP)
+ ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
+ else
+ ucma_copy_conn_event(&uevent->resp.param.conn,
+ &event->param.conn);
+
+ mutex_lock(&ctx->file->mut);
+ if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
+ if (!ctx->backlog) {
+ ret = -EDQUOT;
+ goto out;
+ }
+ ctx->backlog--;
+ }
+ list_add_tail(&uevent->list, &ctx->file->event_list);
+ wake_up_interruptible(&ctx->file->poll_wait);
+out:
+ mutex_unlock(&ctx->file->mut);
+ return ret;
+}
+
+static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct ucma_context *ctx;
+ struct rdma_ucm_get_event cmd;
+ struct ucma_event *uevent;
+ int ret = 0;
+ DEFINE_WAIT(wait);
+
+ if (out_len < sizeof uevent->resp)
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ mutex_lock(&file->mut);
+ while (list_empty(&file->event_list)) {
+ if (file->filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
+ mutex_unlock(&file->mut);
+ schedule();
+ mutex_lock(&file->mut);
+ finish_wait(&file->poll_wait, &wait);
+ }
+
+ if (ret)
+ goto done;
+
+ uevent = list_entry(file->event_list.next, struct ucma_event, list);
+
+ if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
+ ctx = ucma_alloc_ctx(file);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ uevent->ctx->backlog++;
+ ctx->cm_id = uevent->cm_id;
+ ctx->cm_id->context = ctx;
+ uevent->resp.id = ctx->id;
+ }
+
+ if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ &uevent->resp, sizeof uevent->resp)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ list_del(&uevent->list);
+ uevent->ctx->events_reported++;
+ kfree(uevent);
+done:
+ mutex_unlock(&file->mut);
+ return ret;
+}
+
+static ssize_t ucma_create_id(struct ucma_file *file,
+ const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_create_id cmd;
+ struct rdma_ucm_create_id_resp resp;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (out_len < sizeof(resp))
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ mutex_lock(&file->mut);
+ ctx = ucma_alloc_ctx(file);
+ mutex_unlock(&file->mut);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->uid = cmd.uid;
+ ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
+ if (IS_ERR(ctx->cm_id)) {
+ ret = PTR_ERR(ctx->cm_id);
+ goto err1;
+ }
+
+ resp.id = ctx->id;
+ if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ &resp, sizeof(resp))) {
+ ret = -EFAULT;
+ goto err2;
+ }
+ return 0;
+
+err2:
+ rdma_destroy_id(ctx->cm_id);
+err1:
+ mutex_lock(&mut);
+ idr_remove(&ctx_idr, ctx->id);
+ mutex_unlock(&mut);
+ kfree(ctx);
+ return ret;
+}
+
+static void ucma_cleanup_events(struct ucma_context *ctx)
+{
+ struct ucma_event *uevent, *tmp;
+
+ list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
+ if (uevent->ctx != ctx)
+ continue;
+
+ list_del(&uevent->list);
+
+ /* clear incoming connections. */
+ if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
+ rdma_destroy_id(uevent->cm_id);
+
+ kfree(uevent);
+ }
+}
+
+static int ucma_free_ctx(struct ucma_context *ctx)
+{
+ int events_reported;
+
+ /* No new events will be generated after destroying the id. */
+ rdma_destroy_id(ctx->cm_id);
+
+ /* Cleanup events not yet reported to the user. */
+ mutex_lock(&ctx->file->mut);
+ ucma_cleanup_events(ctx);
+ list_del(&ctx->list);
+ mutex_unlock(&ctx->file->mut);
+
+ events_reported = ctx->events_reported;
+ kfree(ctx);
+ return events_reported;
+}
+
+static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_destroy_id cmd;
+ struct rdma_ucm_destroy_id_resp resp;
+ struct ucma_context *ctx;
+ int ret = 0;
+
+ if (out_len < sizeof(resp))
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ mutex_lock(&mut);
+ ctx = _ucma_find_context(cmd.id, file);
+ if (!IS_ERR(ctx))
+ idr_remove(&ctx_idr, ctx->id);
+ mutex_unlock(&mut);
+
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ucma_put_ctx(ctx);
+ wait_for_completion(&ctx->comp);
+ resp.events_reported = ucma_free_ctx(ctx);
+
+ if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ &resp, sizeof(resp)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_bind_addr cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_resolve_addr(struct ucma_file *file,
+ const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_resolve_addr cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+ (struct sockaddr *) &cmd.dst_addr,
+ cmd.timeout_ms);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_resolve_route(struct ucma_file *file,
+ const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_resolve_route cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
+ struct rdma_route *route)
+{
+ struct rdma_dev_addr *dev_addr;
+
+ resp->num_paths = route->num_paths;
+ switch (route->num_paths) {
+ case 0:
+ dev_addr = &route->addr.dev_addr;
+ ib_addr_get_dgid(dev_addr,
+ (union ib_gid *) &resp->ib_route[0].dgid);
+ ib_addr_get_sgid(dev_addr,
+ (union ib_gid *) &resp->ib_route[0].sgid);
+ resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
+ break;
+ case 2:
+ ib_copy_path_rec_to_user(&resp->ib_route[1],
+ &route->path_rec[1]);
+ /* fall through */
+ case 1:
+ ib_copy_path_rec_to_user(&resp->ib_route[0],
+ &route->path_rec[0]);
+ break;
+ default:
+ break;
+ }
+}
+
+static ssize_t ucma_query_route(struct ucma_file *file,
+ const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_query_route cmd;
+ struct rdma_ucm_query_route_resp resp;
+ struct ucma_context *ctx;
+ struct sockaddr *addr;
+ int ret = 0;
+
+ if (out_len < sizeof(resp))
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ memset(&resp, 0, sizeof resp);
+ addr = &ctx->cm_id->route.addr.src_addr;
+ memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
+ sizeof(struct sockaddr_in) :
+ sizeof(struct sockaddr_in6));
+ addr = &ctx->cm_id->route.addr.dst_addr;
+ memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
+ sizeof(struct sockaddr_in) :
+ sizeof(struct sockaddr_in6));
+ if (!ctx->cm_id->device)
+ goto out;
+
+ resp.node_guid = ctx->cm_id->device->node_guid;
+ resp.port_num = ctx->cm_id->port_num;
+ switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
+ case RDMA_TRANSPORT_IB:
+ ucma_copy_ib_route(&resp, &ctx->cm_id->route);
+ break;
+ default:
+ break;
+ }
+
+out:
+ if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ &resp, sizeof(resp)))
+ ret = -EFAULT;
+
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static void ucma_copy_conn_param(struct rdma_conn_param *dst,
+ struct rdma_ucm_conn_param *src)
+{
+ dst->private_data = src->private_data;
+ dst->private_data_len = src->private_data_len;
+ dst->responder_resources =src->responder_resources;
+ dst->initiator_depth = src->initiator_depth;
+ dst->flow_control = src->flow_control;
+ dst->retry_count = src->retry_count;
+ dst->rnr_retry_count = src->rnr_retry_count;
+ dst->srq = src->srq;
+ dst->qp_num = src->qp_num;
+}
+
+static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_connect cmd;
+ struct rdma_conn_param conn_param;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ if (!cmd.conn_param.valid)
+ return -EINVAL;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ucma_copy_conn_param(&conn_param, &cmd.conn_param);
+ ret = rdma_connect(ctx->cm_id, &conn_param);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_listen cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
+ cmd.backlog : UCMA_MAX_BACKLOG;
+ ret = rdma_listen(ctx->cm_id, ctx->backlog);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_accept cmd;
+ struct rdma_conn_param conn_param;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ if (cmd.conn_param.valid) {
+ ctx->uid = cmd.uid;
+ ucma_copy_conn_param(&conn_param, &cmd.conn_param);
+ ret = rdma_accept(ctx->cm_id, &conn_param);
+ } else
+ ret = rdma_accept(ctx->cm_id, NULL);
+
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_reject cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_disconnect cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = rdma_disconnect(ctx->cm_id);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_init_qp_attr(struct ucma_file *file,
+ const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_init_qp_attr cmd;
+ struct ib_uverbs_qp_attr resp;
+ struct ucma_context *ctx;
+ struct ib_qp_attr qp_attr;
+ int ret;
+
+ if (out_len < sizeof(resp))
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ resp.qp_attr_mask = 0;
+ memset(&qp_attr, 0, sizeof qp_attr);
+ qp_attr.qp_state = cmd.qp_state;
+ ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
+ if (ret)
+ goto out;
+
+ ib_copy_qp_attr_to_user(&resp, &qp_attr);
+ if (copy_to_user((void __user *)(unsigned long)cmd.response,
+ &resp, sizeof(resp)))
+ ret = -EFAULT;
+
+out:
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
+ int in_len, int out_len)
+{
+ struct rdma_ucm_notify cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
+static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
+ const char __user *inbuf,
+ int in_len, int out_len) = {
+ [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
+ [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
+ [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
+ [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
+ [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
+ [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
+ [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
+ [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
+ [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
+ [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
+ [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
+ [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
+ [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
+ [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
+ [RDMA_USER_CM_CMD_SET_OPTION] = NULL,
+ [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
+};
+
+static ssize_t ucma_write(struct file *filp, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct ucma_file *file = filp->private_data;
+ struct rdma_ucm_cmd_hdr hdr;
+ ssize_t ret;
+
+ if (len < sizeof(hdr))
+ return -EINVAL;
+
+ if (copy_from_user(&hdr, buf, sizeof(hdr)))
+ return -EFAULT;
+
+ if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
+ return -EINVAL;
+
+ if (hdr.in + sizeof(hdr) > len)
+ return -EINVAL;
+
+ if (!ucma_cmd_table[hdr.cmd])
+ return -ENOSYS;
+
+ ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
+ if (!ret)
+ ret = len;
+
+ return ret;
+}
+
+static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ struct ucma_file *file = filp->private_data;
+ unsigned int mask = 0;
+
+ poll_wait(filp, &file->poll_wait, wait);
+
+ if (!list_empty(&file->event_list))
+ mask = POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+static int ucma_open(struct inode *inode, struct file *filp)
+{
+ struct ucma_file *file;
+
+ file = kmalloc(sizeof *file, GFP_KERNEL);
+ if (!file)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&file->event_list);
+ INIT_LIST_HEAD(&file->ctx_list);
+ init_waitqueue_head(&file->poll_wait);
+ mutex_init(&file->mut);
+
+ filp->private_data = file;
+ file->filp = filp;
+ return 0;
+}
+
+static int ucma_close(struct inode *inode, struct file *filp)
+{
+ struct ucma_file *file = filp->private_data;
+ struct ucma_context *ctx, *tmp;
+
+ mutex_lock(&file->mut);
+ list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
+ mutex_unlock(&file->mut);
+
+ mutex_lock(&mut);
+ idr_remove(&ctx_idr, ctx->id);
+ mutex_unlock(&mut);
+
+ ucma_free_ctx(ctx);
+ mutex_lock(&file->mut);
+ }
+ mutex_unlock(&file->mut);
+ kfree(file);
+ return 0;
+}
+
+static struct file_operations ucma_fops = {
+ .owner = THIS_MODULE,
+ .open = ucma_open,
+ .release = ucma_close,
+ .write = ucma_write,
+ .poll = ucma_poll,
+};
+
+static struct miscdevice ucma_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "rdma_cm",
+ .fops = &ucma_fops,
+};
+
+static ssize_t show_abi_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
+}
+static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
+
+static int __init ucma_init(void)
+{
+ int ret;
+
+ ret = misc_register(&ucma_misc);
+ if (ret)
+ return ret;
+
+ ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
+ if (ret) {
+ printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
+ goto err;
+ }
+ return 0;
+err:
+ misc_deregister(&ucma_misc);
+ return ret;
+}
+
+static void __exit ucma_cleanup(void)
+{
+ device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
+ misc_deregister(&ucma_misc);
+ idr_destroy(&ctx_idr);
+}
+
+module_init(ucma_init);
+module_exit(ucma_cleanup);
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index ce46b13ae02..5440da0e59b 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -32,8 +32,8 @@
#include <rdma/ib_marshall.h>
-static void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
- struct ib_ah_attr *src)
+void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
+ struct ib_ah_attr *src)
{
memcpy(dst->grh.dgid, src->grh.dgid.raw, sizeof src->grh.dgid);
dst->grh.flow_label = src->grh.flow_label;
@@ -47,6 +47,7 @@ static void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
dst->port_num = src->port_num;
}
+EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
struct ib_qp_attr *src)
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index db12cc0841d..c95fe952abd 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -52,8 +52,8 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
int i;
list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
- dma_unmap_sg(dev->dma_device, chunk->page_list,
- chunk->nents, DMA_BIDIRECTIONAL);
+ ib_dma_unmap_sg(dev, chunk->page_list,
+ chunk->nents, DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->nents; ++i) {
if (umem->writable && dirty)
set_page_dirty_lock(chunk->page_list[i].page);
@@ -136,10 +136,10 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
chunk->page_list[i].length = PAGE_SIZE;
}
- chunk->nmap = dma_map_sg(dev->dma_device,
- &chunk->page_list[0],
- chunk->nents,
- DMA_BIDIRECTIONAL);
+ chunk->nmap = ib_dma_map_sg(dev,
+ &chunk->page_list[0],
+ chunk->nents,
+ DMA_BIDIRECTIONAL);
if (chunk->nmap <= 0) {
for (i = 0; i < chunk->nents; ++i)
put_page(chunk->page_list[i].page);
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 179d005ed4a..420c1380f5c 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -161,8 +161,10 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
if (attr_mask & IB_QP_STATE) {
/* Ensure the state is valid */
- if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR)
- return -EINVAL;
+ if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) {
+ err = -EINVAL;
+ goto bail0;
+ }
wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
@@ -184,9 +186,10 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
if (attr->cur_qp_state != IB_QPS_RTR &&
attr->cur_qp_state != IB_QPS_RTS &&
attr->cur_qp_state != IB_QPS_SQD &&
- attr->cur_qp_state != IB_QPS_SQE)
- return -EINVAL;
- else
+ attr->cur_qp_state != IB_QPS_SQE) {
+ err = -EINVAL;
+ goto bail0;
+ } else
wr.next_qp_state =
cpu_to_be32(to_c2_state(attr->cur_qp_state));
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index 7dc10551cf1..ec2e603ea24 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
ib_ipath-y := \
ipath_cq.o \
ipath_diag.o \
+ ipath_dma.o \
ipath_driver.o \
ipath_eeprom.o \
ipath_file_ops.o \
diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
new file mode 100644
index 00000000000..6e0f2b8918c
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2006 QLogic, Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_verbs.h>
+
+#include "ipath_verbs.h"
+
+#define BAD_DMA_ADDRESS ((u64) 0)
+
+/*
+ * The following functions implement driver specific replacements
+ * for the ib_dma_*() functions.
+ *
+ * These functions return kernel virtual addresses instead of
+ * device bus addresses since the driver uses the CPU to copy
+ * data instead of using hardware DMA.
+ */
+
+static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+ return dma_addr == BAD_DMA_ADDRESS;
+}
+
+static u64 ipath_dma_map_single(struct ib_device *dev,
+ void *cpu_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ return (u64) cpu_addr;
+}
+
+static void ipath_dma_unmap_single(struct ib_device *dev,
+ u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+}
+
+static u64 ipath_dma_map_page(struct ib_device *dev,
+ struct page *page,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ u64 addr;
+
+ BUG_ON(!valid_dma_direction(direction));
+
+ if (offset + size > PAGE_SIZE) {
+ addr = BAD_DMA_ADDRESS;
+ goto done;
+ }
+
+ addr = (u64) page_address(page);
+ if (addr)
+ addr += offset;
+ /* TODO: handle highmem pages */
+
+done:
+ return addr;
+}
+
+static void ipath_dma_unmap_page(struct ib_device *dev,
+ u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+}
+
+int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ u64 addr;
+ int i;
+ int ret = nents;
+
+ BUG_ON(!valid_dma_direction(direction));
+
+ for (i = 0; i < nents; i++) {
+ addr = (u64) page_address(sg[i].page);
+ /* TODO: handle highmem pages */
+ if (!addr) {
+ ret = 0;
+ break;
+ }
+ }
+ return ret;
+}
+
+static void ipath_unmap_sg(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+}
+
+static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
+{
+ u64 addr = (u64) page_address(sg->page);
+
+ if (addr)
+ addr += sg->offset;
+ return addr;
+}
+
+static unsigned int ipath_sg_dma_len(struct ib_device *dev,
+ struct scatterlist *sg)
+{
+ return sg->length;
+}
+
+static void ipath_sync_single_for_cpu(struct ib_device *dev,
+ u64 addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+static void ipath_sync_single_for_device(struct ib_device *dev,
+ u64 addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size,
+ u64 *dma_handle, gfp_t flag)
+{
+ struct page *p;
+ void *addr = NULL;
+
+ p = alloc_pages(flag, get_order(size));
+ if (p)
+ addr = page_address(p);
+ if (dma_handle)
+ *dma_handle = (u64) addr;
+ return addr;
+}
+
+static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ free_pages((unsigned long) cpu_addr, get_order(size));
+}
+
+struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
+ ipath_mapping_error,
+ ipath_dma_map_single,
+ ipath_dma_unmap_single,
+ ipath_dma_map_page,
+ ipath_dma_unmap_page,
+ ipath_map_sg,
+ ipath_unmap_sg,
+ ipath_sg_dma_address,
+ ipath_sg_dma_len,
+ ipath_sync_single_for_cpu,
+ ipath_sync_single_for_device,
+ ipath_dma_alloc_coherent,
+ ipath_dma_free_coherent
+};
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 1aeddb48e35..ae7f21a0cdc 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -1825,8 +1825,6 @@ void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
*/
void ipath_shutdown_device(struct ipath_devdata *dd)
{
- u64 val;
-
ipath_dbg("Shutting down the device\n");
dd->ipath_flags |= IPATH_LINKUNK;
@@ -1849,7 +1847,7 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
*/
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
/* flush it */
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
/*
* enough for anything that's going to trickle out to have actually
* done so.
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 340f27e3ebf..b932bcb67a5 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -699,7 +699,6 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
int start_stop)
{
struct ipath_devdata *dd = pd->port_dd;
- u64 tval;
ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n",
start_stop ? "en" : "dis", dd->ipath_unit,
@@ -729,7 +728,7 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl);
/* now be sure chip saw it before we return */
- tval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
if (start_stop) {
/*
* And try to be sure that tail reg update has happened too.
@@ -738,7 +737,7 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
* in memory copy, since we could overwrite an update by the
* chip if we did.
*/
- tval = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
+ ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
}
/* always; new head should be equal to new tail; see above */
bail:
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index e57c7a351cb..7468477ba83 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -1447,7 +1447,7 @@ static void ipath_ht_tidtemplate(struct ipath_devdata *dd)
static int ipath_ht_early_init(struct ipath_devdata *dd)
{
u32 __iomem *piobuf;
- u32 pioincr, val32, egrsize;
+ u32 pioincr, val32;
int i;
/*
@@ -1467,7 +1467,6 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
* errors interrupts if we ever see one).
*/
dd->ipath_rcvegrbufsize = dd->ipath_piosize2k;
- egrsize = dd->ipath_rcvegrbufsize;
/*
* the min() check here is currently a nop, but it may not
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index 6af89683f71..ae8bf9950c6 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -602,7 +602,7 @@ static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
*/
static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
{
- u64 val, tmp, config1, prev_val;
+ u64 val, config1, prev_val;
int ret = 0;
ipath_dbg("Trying to bringup serdes\n");
@@ -633,7 +633,7 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
| INFINIPATH_SERDC0_L1PWR_DN;
ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
/* be sure chip saw it */
- tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
udelay(5); /* need pll reset set at least for a bit */
/*
* after PLL is reset, set the per-lane Resets and TxIdle and
@@ -647,7 +647,7 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
"and txidle (%llx)\n", (unsigned long long) val);
ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
/* be sure chip saw it */
- tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
/* need PLL reset clear for at least 11 usec before lane
* resets cleared; give it a few more to be sure */
udelay(15);
@@ -851,12 +851,12 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd,
int pos, ret;
dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
- dd->ipath_irq = pdev->irq;
ret = pci_enable_msi(dd->pcidev);
if (ret)
ipath_dev_err(dd, "pci_enable_msi failed: %d, "
"interrupts may not work\n", ret);
/* continue even if it fails, we may still be OK... */
+ dd->ipath_irq = pdev->irq;
if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
u16 control;
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index d819cca524c..d4f6b5239ef 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -347,10 +347,9 @@ done:
static int init_chip_reset(struct ipath_devdata *dd,
struct ipath_portdata **pdp)
{
- struct ipath_portdata *pd;
u32 rtmp;
- *pdp = pd = dd->ipath_pd[0];
+ *pdp = dd->ipath_pd[0];
/* ensure chip does no sends or receives while we re-initialize */
dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0);
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 5652a550d44..72b9e279d19 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -598,10 +598,9 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
* on close
*/
if (errs & INFINIPATH_E_RRCVHDRFULL) {
- int any;
u32 hd, tl;
ipath_stats.sps_hdrqfull++;
- for (any = i = 0; i < dd->ipath_cfgports; i++) {
+ for (i = 0; i < dd->ipath_cfgports; i++) {
struct ipath_portdata *pd = dd->ipath_pd[i];
if (i == 0) {
hd = dd->ipath_port0head;
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index 9a6cbd05adc..851763d7d2d 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -134,7 +134,7 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
*/
if (sge->lkey == 0) {
isge->mr = NULL;
- isge->vaddr = bus_to_virt(sge->addr);
+ isge->vaddr = (void *) sge->addr;
isge->length = sge->length;
isge->sge_length = sge->length;
ret = 1;
@@ -202,12 +202,12 @@ int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
int ret;
/*
- * We use RKEY == zero for physical addresses
- * (see ipath_get_dma_mr).
+ * We use RKEY == zero for kernel virtual addresses
+ * (see ipath_get_dma_mr and ipath_dma.c).
*/
if (rkey == 0) {
sge->mr = NULL;
- sge->vaddr = phys_to_virt(vaddr);
+ sge->vaddr = (void *) vaddr;
sge->length = len;
sge->sge_length = len;
ss->sg_list = NULL;
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index a0673c1eef7..8cc8598d6c6 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -54,6 +54,8 @@ static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
* @acc: access flags
*
* Returns the memory region on success, otherwise returns an errno.
+ * Note that all DMA addresses should be created via the
+ * struct ib_dma_mapping_ops functions (see ipath_dma.c).
*/
struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
{
@@ -149,8 +151,7 @@ struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
m = 0;
n = 0;
for (i = 0; i < num_phys_buf; i++) {
- mr->mr.map[m]->segs[n].vaddr =
- phys_to_virt(buffer_list[i].addr);
+ mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
mr->mr.map[m]->segs[n].length = buffer_list[i].size;
mr->mr.length += buffer_list[i].size;
n++;
@@ -347,7 +348,7 @@ int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
n = 0;
ps = 1 << fmr->page_shift;
for (i = 0; i < list_len; i++) {
- fmr->mr.map[m]->segs[n].vaddr = phys_to_virt(page_list[i]);
+ fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
fmr->mr.map[m]->segs[n].length = ps;
if (++n == IPATH_SEGSZ) {
m++;
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index 182de34f9f4..ffa6318ad0c 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -215,7 +215,6 @@ static ssize_t store_mlid(struct device *dev,
size_t count)
{
struct ipath_devdata *dd = dev_get_drvdata(dev);
- int unit;
u16 mlid;
int ret;
@@ -223,8 +222,6 @@ static ssize_t store_mlid(struct device *dev,
if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE)
goto invalid;
- unit = dd->ipath_unit;
-
dd->ipath_mlid = mlid;
goto bail;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index acdee33ee1f..2aaacdb7e52 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1599,6 +1599,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
dev->detach_mcast = ipath_multicast_detach;
dev->process_mad = ipath_process_mad;
dev->mmap = ipath_mmap;
+ dev->dma_ops = &ipath_dma_mapping_ops;
snprintf(dev->node_desc, sizeof(dev->node_desc),
IPATH_IDSTR " %s", init_utsname()->nodename);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 8039f6e5f0c..c0c8d5b24a7 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -812,4 +812,6 @@ extern unsigned int ib_ipath_max_srq_wrs;
extern const u32 ib_ipath_rnr_table[];
+extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;
+
#endif /* IPATH_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 99547996aba..07deee8f81c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -105,12 +105,12 @@ struct ipoib_mcast;
struct ipoib_rx_buf {
struct sk_buff *skb;
- dma_addr_t mapping;
+ u64 mapping;
};
struct ipoib_tx_buf {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(mapping)
+ u64 mapping;
};
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f10fba5d326..59d9594ed6d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -109,9 +109,8 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
ret = ib_post_recv(priv->qp, &param, &bad_wr);
if (unlikely(ret)) {
ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
- dma_unmap_single(priv->ca->dma_device,
- priv->rx_ring[id].mapping,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
+ IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(priv->rx_ring[id].skb);
priv->rx_ring[id].skb = NULL;
}
@@ -123,7 +122,7 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
- dma_addr_t addr;
+ u64 addr;
skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
if (!skb)
@@ -136,10 +135,9 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
*/
skb_reserve(skb, 4);
- addr = dma_map_single(priv->ca->dma_device,
- skb->data, IPOIB_BUF_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(addr))) {
+ addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
dev_kfree_skb_any(skb);
return -EIO;
}
@@ -174,7 +172,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
struct ipoib_dev_priv *priv = netdev_priv(dev);
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
struct sk_buff *skb;
- dma_addr_t addr;
+ u64 addr;
ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n",
wr_id, wc->opcode, wc->status);
@@ -193,8 +191,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
ipoib_warn(priv, "failed recv event "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
- dma_unmap_single(priv->ca->dma_device, addr,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ib_dma_unmap_single(priv->ca, addr,
+ IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
priv->rx_ring[wr_id].skb = NULL;
return;
@@ -212,8 +210,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
- dma_unmap_single(priv->ca->dma_device, addr,
- IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
skb_put(skb, wc->byte_len);
skb_pull(skb, IB_GRH_BYTES);
@@ -261,10 +258,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
tx_req = &priv->tx_ring[wr_id];
- dma_unmap_single(priv->ca->dma_device,
- pci_unmap_addr(tx_req, mapping),
- tx_req->skb->len,
- DMA_TO_DEVICE);
+ ib_dma_unmap_single(priv->ca, tx_req->mapping,
+ tx_req->skb->len, DMA_TO_DEVICE);
++priv->stats.tx_packets;
priv->stats.tx_bytes += tx_req->skb->len;
@@ -311,7 +306,7 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
static inline int post_send(struct ipoib_dev_priv *priv,
unsigned int wr_id,
struct ib_ah *address, u32 qpn,
- dma_addr_t addr, int len)
+ u64 addr, int len)
{
struct ib_send_wr *bad_wr;
@@ -330,7 +325,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_tx_buf *tx_req;
- dma_addr_t addr;
+ u64 addr;
if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -353,21 +348,20 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
*/
tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
tx_req->skb = skb;
- addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(addr))) {
+ addr = ib_dma_map_single(priv->ca, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
++priv->stats.tx_errors;
dev_kfree_skb_any(skb);
return;
}
- pci_unmap_addr_set(tx_req, mapping, addr);
+ tx_req->mapping = addr;
if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
address->ah, qpn, addr, skb->len))) {
ipoib_warn(priv, "post_send failed\n");
++priv->stats.tx_errors;
- dma_unmap_single(priv->ca->dma_device, addr, skb->len,
- DMA_TO_DEVICE);
+ ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
} else {
dev->trans_start = jiffies;
@@ -538,24 +532,27 @@ int ipoib_ib_dev_stop(struct net_device *dev)
while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
tx_req = &priv->tx_ring[priv->tx_tail &
(ipoib_sendq_size - 1)];
- dma_unmap_single(priv->ca->dma_device,
- pci_unmap_addr(tx_req, mapping),
- tx_req->skb->len,
- DMA_TO_DEVICE);
+ ib_dma_unmap_single(priv->ca,
+ tx_req->mapping,
+ tx_req->skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
}
- for (i = 0; i < ipoib_recvq_size; ++i)
- if (priv->rx_ring[i].skb) {
- dma_unmap_single(priv->ca->dma_device,
- pci_unmap_addr(&priv->rx_ring[i],
- mapping),
- IPOIB_BUF_SIZE,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(priv->rx_ring[i].skb);
- priv->rx_ring[i].skb = NULL;
- }
+ for (i = 0; i < ipoib_recvq_size; ++i) {
+ struct ipoib_rx_buf *rx_req;
+
+ rx_req = &priv->rx_ring[i];
+ if (!rx_req->skb)
+ continue;
+ ib_dma_unmap_single(priv->ca,
+ rx_req->mapping,
+ IPOIB_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(rx_req->skb);
+ rx_req->skb = NULL;
+ }
goto timeout;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index c0928024372..705eb1d0e55 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -497,8 +497,6 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
return;
}
- skb_queue_head_init(&neigh->queue);
-
/*
* We can only be called from ipoib_start_xmit, so we're
* inside tx_lock -- no need to save/restore flags.
@@ -806,6 +804,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour)
neigh->neighbour = neighbour;
*to_ipoib_neigh(neighbour) = neigh;
+ skb_queue_head_init(&neigh->queue);
return neigh;
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 234e5b061a7..cae8c96a55f 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -182,7 +182,7 @@ struct iser_regd_buf {
struct iser_mem_reg reg; /* memory registration info */
void *virt_addr;
struct iser_device *device; /* device->device for dma_unmap */
- dma_addr_t dma_addr; /* if non zero, addr for dma_unmap */
+ u64 dma_addr; /* if non zero, addr for dma_unmap */
enum dma_data_direction direction; /* direction for dma_unmap */
unsigned int data_size;
atomic_t ref_count; /* refcount, freed when dec to 0 */
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 9b3d79c796c..e73c87b9be4 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -487,10 +487,8 @@ int iser_send_control(struct iscsi_conn *conn,
struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iser_desc *mdesc = mtask->dd_data;
struct iser_dto *send_dto = NULL;
- unsigned int itt;
unsigned long data_seg_len;
int err = 0;
- unsigned char opcode;
struct iser_regd_buf *regd_buf;
struct iser_device *device;
@@ -512,8 +510,6 @@ int iser_send_control(struct iscsi_conn *conn,
iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
- itt = ntohl(mtask->hdr->itt);
- opcode = mtask->hdr->opcode & ISCSI_OPCODE_MASK;
data_seg_len = ntoh24(mtask->hdr->dlength);
if (data_seg_len > 0) {
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 3aedd59b8a8..fc9f1fd0ae5 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -52,7 +52,7 @@
*/
int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
{
- struct device *dma_device;
+ struct ib_device *dev;
if ((atomic_read(&regd_buf->ref_count) == 0) ||
atomic_dec_and_test(&regd_buf->ref_count)) {
@@ -61,8 +61,8 @@ int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
iser_unreg_mem(&regd_buf->reg);
if (regd_buf->dma_addr) {
- dma_device = regd_buf->device->ib_device->dma_device;
- dma_unmap_single(dma_device,
+ dev = regd_buf->device->ib_device;
+ ib_dma_unmap_single(dev,
regd_buf->dma_addr,
regd_buf->data_size,
regd_buf->direction);
@@ -84,12 +84,12 @@ void iser_reg_single(struct iser_device *device,
struct iser_regd_buf *regd_buf,
enum dma_data_direction direction)
{
- dma_addr_t dma_addr;
+ u64 dma_addr;
- dma_addr = dma_map_single(device->ib_device->dma_device,
- regd_buf->virt_addr,
- regd_buf->data_size, direction);
- BUG_ON(dma_mapping_error(dma_addr));
+ dma_addr = ib_dma_map_single(device->ib_device,
+ regd_buf->virt_addr,
+ regd_buf->data_size, direction);
+ BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr));
regd_buf->reg.lkey = device->mr->lkey;
regd_buf->reg.len = regd_buf->data_size;
@@ -107,7 +107,7 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
enum iser_data_dir cmd_dir)
{
int dma_nents;
- struct device *dma_device;
+ struct ib_device *dev;
char *mem = NULL;
struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
unsigned long cmd_data_len = data->data_len;
@@ -147,17 +147,12 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
iser_ctask->data_copy[cmd_dir].copy_buf = mem;
- dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
-
- if (cmd_dir == ISER_DIR_OUT)
- dma_nents = dma_map_sg(dma_device,
- &iser_ctask->data_copy[cmd_dir].sg_single,
- 1, DMA_TO_DEVICE);
- else
- dma_nents = dma_map_sg(dma_device,
- &iser_ctask->data_copy[cmd_dir].sg_single,
- 1, DMA_FROM_DEVICE);
-
+ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ dma_nents = ib_dma_map_sg(dev,
+ &iser_ctask->data_copy[cmd_dir].sg_single,
+ 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
BUG_ON(dma_nents == 0);
iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
@@ -170,19 +165,16 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
enum iser_data_dir cmd_dir)
{
- struct device *dma_device;
+ struct ib_device *dev;
struct iser_data_buf *mem_copy;
unsigned long cmd_data_len;
- dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
- mem_copy = &iser_ctask->data_copy[cmd_dir];
+ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ mem_copy = &iser_ctask->data_copy[cmd_dir];
- if (cmd_dir == ISER_DIR_OUT)
- dma_unmap_sg(dma_device, &mem_copy->sg_single, 1,
- DMA_TO_DEVICE);
- else
- dma_unmap_sg(dma_device, &mem_copy->sg_single, 1,
- DMA_FROM_DEVICE);
+ ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (cmd_dir == ISER_DIR_IN) {
char *mem;
@@ -231,11 +223,12 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
* consecutive elements. Also, it handles one entry SG.
*/
static int iser_sg_to_page_vec(struct iser_data_buf *data,
- struct iser_page_vec *page_vec)
+ struct iser_page_vec *page_vec,
+ struct ib_device *ibdev)
{
struct scatterlist *sg = (struct scatterlist *)data->buf;
- dma_addr_t first_addr, last_addr, page;
- int start_aligned, end_aligned;
+ u64 first_addr, last_addr, page;
+ int end_aligned;
unsigned int cur_page = 0;
unsigned long total_sz = 0;
int i;
@@ -244,19 +237,21 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
page_vec->offset = (u64) sg[0].offset & ~MASK_4K;
for (i = 0; i < data->dma_nents; i++) {
- total_sz += sg_dma_len(&sg[i]);
+ unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]);
+
+ total_sz += dma_len;
- first_addr = sg_dma_address(&sg[i]);
- last_addr = first_addr + sg_dma_len(&sg[i]);
+ first_addr = ib_sg_dma_address(ibdev, &sg[i]);
+ last_addr = first_addr + dma_len;
- start_aligned = !(first_addr & ~MASK_4K);
end_aligned = !(last_addr & ~MASK_4K);
/* continue to collect page fragments till aligned or SG ends */
while (!end_aligned && (i + 1 < data->dma_nents)) {
i++;
- total_sz += sg_dma_len(&sg[i]);
- last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]);
+ dma_len = ib_sg_dma_len(ibdev, &sg[i]);
+ total_sz += dma_len;
+ last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len;
end_aligned = !(last_addr & ~MASK_4K);
}
@@ -288,10 +283,11 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
* the number of entries which are aligned correctly. Supports the case where
* consecutive SG elements are actually fragments of the same physcial page.
*/
-static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
+static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
+ struct ib_device *ibdev)
{
struct scatterlist *sg;
- dma_addr_t end_addr, next_addr;
+ u64 end_addr, next_addr;
int i, cnt;
unsigned int ret_len = 0;
@@ -303,12 +299,12 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
(unsigned long)page_to_phys(sg[i].page),
(unsigned long)sg[i].offset,
(unsigned long)sg[i].length); */
- end_addr = sg_dma_address(&sg[i]) +
- sg_dma_len(&sg[i]);
+ end_addr = ib_sg_dma_address(ibdev, &sg[i]) +
+ ib_sg_dma_len(ibdev, &sg[i]);
/* iser_dbg("Checking sg iobuf end address "
"0x%08lX\n", end_addr); */
if (i + 1 < data->dma_nents) {
- next_addr = sg_dma_address(&sg[i+1]);
+ next_addr = ib_sg_dma_address(ibdev, &sg[i+1]);
/* are i, i+1 fragments of the same page? */
if (end_addr == next_addr)
continue;
@@ -325,7 +321,8 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data)
return ret_len;
}
-static void iser_data_buf_dump(struct iser_data_buf *data)
+static void iser_data_buf_dump(struct iser_data_buf *data,
+ struct ib_device *ibdev)
{
struct scatterlist *sg = (struct scatterlist *)data->buf;
int i;
@@ -333,9 +330,9 @@ static void iser_data_buf_dump(struct iser_data_buf *data)
for (i = 0; i < data->dma_nents; i++)
iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
"off:0x%x sz:0x%x dma_len:0x%x\n",
- i, (unsigned long)sg_dma_address(&sg[i]),
+ i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]),
sg[i].page, sg[i].offset,
- sg[i].length,sg_dma_len(&sg[i]));
+ sg[i].length, ib_sg_dma_len(ibdev, &sg[i]));
}
static void iser_dump_page_vec(struct iser_page_vec *page_vec)
@@ -349,7 +346,8 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec)
}
static void iser_page_vec_build(struct iser_data_buf *data,
- struct iser_page_vec *page_vec)
+ struct iser_page_vec *page_vec,
+ struct ib_device *ibdev)
{
int page_vec_len = 0;
@@ -357,14 +355,14 @@ static void iser_page_vec_build(struct iser_data_buf *data,
page_vec->offset = 0;
iser_dbg("Translating sg sz: %d\n", data->dma_nents);
- page_vec_len = iser_sg_to_page_vec(data,page_vec);
+ page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev);
iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len);
page_vec->length = page_vec_len;
if (page_vec_len * SIZE_4K < page_vec->data_size) {
iser_err("page_vec too short to hold this SG\n");
- iser_data_buf_dump(data);
+ iser_data_buf_dump(data, ibdev);
iser_dump_page_vec(page_vec);
BUG();
}
@@ -375,13 +373,12 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir)
{
- struct device *dma_device;
+ struct ib_device *dev;
iser_ctask->dir[iser_dir] = 1;
- dma_device =
- iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
+ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
- data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir);
+ data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
if (data->dma_nents == 0) {
iser_err("dma_map_sg failed!!!\n");
return -EINVAL;
@@ -391,20 +388,19 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
{
- struct device *dma_device;
+ struct ib_device *dev;
struct iser_data_buf *data;
- dma_device =
- iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device;
+ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
if (iser_ctask->dir[ISER_DIR_IN]) {
data = &iser_ctask->data[ISER_DIR_IN];
- dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE);
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
}
if (iser_ctask->dir[ISER_DIR_OUT]) {
data = &iser_ctask->data[ISER_DIR_OUT];
- dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE);
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
}
}
@@ -419,6 +415,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
{
struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
+ struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
struct iser_regd_buf *regd_buf;
int aligned_len;
@@ -428,11 +425,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
regd_buf = &iser_ctask->rdma_regd[cmd_dir];
- aligned_len = iser_data_buf_aligned_len(mem);
+ aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
iser_err("rdma alignment violation %d/%d aligned\n",
aligned_len, mem->size);
- iser_data_buf_dump(mem);
+ iser_data_buf_dump(mem, ibdev);
/* unmap the command data before accessing it */
iser_dma_unmap_task_data(iser_ctask);
@@ -450,8 +447,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
regd_buf->reg.lkey = device->mr->lkey;
regd_buf->reg.rkey = device->mr->rkey;
- regd_buf->reg.len = sg_dma_len(&sg[0]);
- regd_buf->reg.va = sg_dma_address(&sg[0]);
+ regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
+ regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
regd_buf->reg.is_fmr = 0;
iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
@@ -461,10 +458,10 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
(unsigned long)regd_buf->reg.va,
(unsigned long)regd_buf->reg.len);
} else { /* use FMR for multiple dma entries */
- iser_page_vec_build(mem, ib_conn->page_vec);
+ iser_page_vec_build(mem, ib_conn->page_vec, ibdev);
err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
if (err) {
- iser_data_buf_dump(mem);
+ iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
ntoh24(iser_ctask->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index a6289595557..e9b6a6f07dd 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -122,9 +122,8 @@ static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
if (!iu->buf)
goto out_free_iu;
- iu->dma = dma_map_single(host->dev->dev->dma_device,
- iu->buf, size, direction);
- if (dma_mapping_error(iu->dma))
+ iu->dma = ib_dma_map_single(host->dev->dev, iu->buf, size, direction);
+ if (ib_dma_mapping_error(host->dev->dev, iu->dma))
goto out_free_buf;
iu->size = size;
@@ -145,8 +144,7 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
if (!iu)
return;
- dma_unmap_single(host->dev->dev->dma_device,
- iu->dma, iu->size, iu->direction);
+ ib_dma_unmap_single(host->dev->dev, iu->dma, iu->size, iu->direction);
kfree(iu->buf);
kfree(iu);
}
@@ -482,8 +480,8 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
scat = &req->fake_sg;
}
- dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents,
- scmnd->sc_data_direction);
+ ib_dma_unmap_sg(target->srp_host->dev->dev, scat, nents,
+ scmnd->sc_data_direction);
}
static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
@@ -595,23 +593,26 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
int i, j;
int ret;
struct srp_device *dev = target->srp_host->dev;
+ struct ib_device *ibdev = dev->dev;
if (!dev->fmr_pool)
return -ENODEV;
- if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) &&
+ if ((ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask) &&
mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3))
return -EINVAL;
len = page_cnt = 0;
for (i = 0; i < sg_cnt; ++i) {
- if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) {
+ unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
+
+ if (ib_sg_dma_address(ibdev, &scat[i]) & ~dev->fmr_page_mask) {
if (i > 0)
return -EINVAL;
else
++page_cnt;
}
- if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) &
+ if ((ib_sg_dma_address(ibdev, &scat[i]) + dma_len) &
~dev->fmr_page_mask) {
if (i < sg_cnt - 1)
return -EINVAL;
@@ -619,7 +620,7 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
++page_cnt;
}
- len += sg_dma_len(&scat[i]);
+ len += dma_len;
}
page_cnt += len >> dev->fmr_page_shift;
@@ -631,10 +632,14 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
return -ENOMEM;
page_cnt = 0;
- for (i = 0; i < sg_cnt; ++i)
- for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size)
+ for (i = 0; i < sg_cnt; ++i) {
+ unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
+
+ for (j = 0; j < dma_len; j += dev->fmr_page_size)
dma_pages[page_cnt++] =
- (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j;
+ (ib_sg_dma_address(ibdev, &scat[i]) &
+ dev->fmr_page_mask) + j;
+ }
req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
dma_pages, page_cnt, io_addr);
@@ -644,7 +649,8 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
goto out;
}
- buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask);
+ buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) &
+ ~dev->fmr_page_mask);
buf->key = cpu_to_be32(req->fmr->fmr->rkey);
buf->len = cpu_to_be32(len);
@@ -663,6 +669,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
struct srp_cmd *cmd = req->cmd->buf;
int len, nents, count;
u8 fmt = SRP_DATA_DESC_DIRECT;
+ struct srp_device *dev;
+ struct ib_device *ibdev;
if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
return sizeof (struct srp_cmd);
@@ -687,8 +695,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
}
- count = dma_map_sg(target->srp_host->dev->dev->dma_device,
- scat, nents, scmnd->sc_data_direction);
+ dev = target->srp_host->dev;
+ ibdev = dev->dev;
+
+ count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
fmt = SRP_DATA_DESC_DIRECT;
len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
@@ -702,9 +712,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
*/
struct srp_direct_buf *buf = (void *) cmd->add_data;
- buf->va = cpu_to_be64(sg_dma_address(scat));
- buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey);
- buf->len = cpu_to_be32(sg_dma_len(scat));
+ buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
+ buf->key = cpu_to_be32(dev->mr->rkey);
+ buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
} else if (srp_map_fmr(target, scat, count, req,
(void *) cmd->add_data)) {
/*
@@ -722,13 +732,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
count * sizeof (struct srp_direct_buf);
for (i = 0; i < count; ++i) {
+ unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
+
buf->desc_list[i].va =
- cpu_to_be64(sg_dma_address(&scat[i]));
+ cpu_to_be64(ib_sg_dma_address(ibdev, &scat[i]));
buf->desc_list[i].key =
- cpu_to_be32(target->srp_host->dev->mr->rkey);
- buf->desc_list[i].len =
- cpu_to_be32(sg_dma_len(&scat[i]));
- datalen += sg_dma_len(&scat[i]);
+ cpu_to_be32(dev->mr->rkey);
+ buf->desc_list[i].len = cpu_to_be32(dma_len);
+ datalen += dma_len;
}
if (scmnd->sc_data_direction == DMA_TO_DEVICE)
@@ -808,13 +819,15 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
{
+ struct ib_device *dev;
struct srp_iu *iu;
u8 opcode;
iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
- dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma,
- target->max_ti_iu_len, DMA_FROM_DEVICE);
+ dev = target->srp_host->dev->dev;
+ ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
+ DMA_FROM_DEVICE);
opcode = *(u8 *) iu->buf;
@@ -850,8 +863,8 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
break;
}
- dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma,
- target->max_ti_iu_len, DMA_FROM_DEVICE);
+ ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
+ DMA_FROM_DEVICE);
}
static void srp_completion(struct ib_cq *cq, void *target_ptr)
@@ -969,6 +982,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
struct srp_request *req;
struct srp_iu *iu;
struct srp_cmd *cmd;
+ struct ib_device *dev;
int len;
if (target->state == SRP_TARGET_CONNECTING)
@@ -985,8 +999,9 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
if (!iu)
goto err;
- dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma,
- srp_max_iu_len, DMA_TO_DEVICE);
+ dev = target->srp_host->dev->dev;
+ ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
+ DMA_TO_DEVICE);
req = list_entry(target->free_reqs.next, struct srp_request, list);
@@ -1018,8 +1033,8 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
goto err_unmap;
}
- dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma,
- srp_max_iu_len, DMA_TO_DEVICE);
+ ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
+ DMA_TO_DEVICE);
if (__srp_post_send(target, iu, len)) {
printk(KERN_ERR PFX "Send failed\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index d4e35ef5137..868a540ef7c 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -161,7 +161,7 @@ struct srp_target_port {
};
struct srp_iu {
- dma_addr_t dma;
+ u64 dma;
void *buf;
size_t size;
enum dma_data_direction direction;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 176142c6149..7399ba79111 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -12,7 +12,7 @@ config NEW_LEDS
config LEDS_CLASS
tristate "LED Class Support"
- depends NEW_LEDS
+ depends on NEW_LEDS
help
This option enables the led sysfs class in /sys/class/leds. You'll
need this to do anything useful with LEDs. If unsure, say N.
@@ -21,28 +21,28 @@ comment "LED drivers"
config LEDS_CORGI
tristate "LED Support for the Sharp SL-C7x0 series"
- depends LEDS_CLASS && PXA_SHARP_C7xx
+ depends on LEDS_CLASS && PXA_SHARP_C7xx
help
This option enables support for the LEDs on Sharp Zaurus
SL-C7x0 series (C700, C750, C760, C860).
config LEDS_LOCOMO
tristate "LED Support for Locomo device"
- depends LEDS_CLASS && SHARP_LOCOMO
+ depends on LEDS_CLASS && SHARP_LOCOMO
help
This option enables support for the LEDs on Sharp Locomo.
Zaurus models SL-5500 and SL-5600.
config LEDS_SPITZ
tristate "LED Support for the Sharp SL-Cxx00 series"
- depends LEDS_CLASS && PXA_SHARP_Cxx00
+ depends on LEDS_CLASS && PXA_SHARP_Cxx00
help
This option enables support for the LEDs on Sharp Zaurus
SL-Cxx00 series (C1000, C3000, C3100).
config LEDS_IXP4XX
tristate "LED Support for GPIO connected LEDs on IXP4XX processors"
- depends LEDS_CLASS && ARCH_IXP4XX
+ depends on LEDS_CLASS && ARCH_IXP4XX
help
This option enables support for the LEDs connected to GPIO
outputs of the Intel IXP4XX processors. To be useful the
@@ -51,7 +51,7 @@ config LEDS_IXP4XX
config LEDS_TOSA
tristate "LED Support for the Sharp SL-6000 series"
- depends LEDS_CLASS && PXA_SHARPSL
+ depends on LEDS_CLASS && PXA_SHARPSL
help
This option enables support for the LEDs on Sharp Zaurus
SL-6000 series.
@@ -65,7 +65,7 @@ config LEDS_S3C24XX
config LEDS_AMS_DELTA
tristate "LED Support for the Amstrad Delta (E3)"
- depends LEDS_CLASS && MACH_AMS_DELTA
+ depends on LEDS_CLASS && MACH_AMS_DELTA
help
This option enables support for the LEDs on Amstrad Delta (E3).
@@ -86,7 +86,7 @@ comment "LED Triggers"
config LEDS_TRIGGERS
bool "LED Trigger support"
- depends NEW_LEDS
+ depends on NEW_LEDS
help
This option enables trigger support for the leds class.
These triggers allow kernel events to drive the LEDs and can
@@ -94,21 +94,21 @@ config LEDS_TRIGGERS
config LEDS_TRIGGER_TIMER
tristate "LED Timer Trigger"
- depends LEDS_TRIGGERS
+ depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by a programmable timer
via sysfs. If unsure, say Y.
config LEDS_TRIGGER_IDE_DISK
bool "LED IDE Disk Trigger"
- depends LEDS_TRIGGERS && BLK_DEV_IDEDISK
+ depends on LEDS_TRIGGERS && BLK_DEV_IDEDISK
help
This allows LEDs to be controlled by IDE disk activity.
If unsure, say Y.
config LEDS_TRIGGER_HEARTBEAT
tristate "LED Heartbeat Trigger"
- depends LEDS_TRIGGERS
+ depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by a CPU load average.
The flash frequency is a hyperbolic function of the 1-minute
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index 89dd18c3c5c..5ed0adc4ca2 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -18,7 +18,7 @@
#include <linux/i2c.h>
-MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net.");
+MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
MODULE_DESCRIPTION("A low-level driver for OmniVision ov7670 sensors");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 03bf164f9e8..c2ae2a24629 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1930,9 +1930,8 @@ static int e100_rx_alloc_list(struct nic *nic)
nic->rx_to_use = nic->rx_to_clean = NULL;
nic->ru_running = RU_UNINITIALIZED;
- if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC)))
+ if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
return -ENOMEM;
- memset(nic->rxs, 0, sizeof(struct rx) * count);
for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 0b36dd5cdac..2978c09860e 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -663,7 +663,7 @@ config V850E_UART
config V850E_UARTB
bool
- depends V850E_UART && V850E_ME2
+ depends on V850E_UART && V850E_ME2
default y
config V850E_UART_CONSOLE
@@ -909,7 +909,7 @@ config SERIAL_M32R_PLDSIO
config SERIAL_TXX9
bool "TMPTX39XX/49XX SIO support"
- depends HAS_TXX9_SERIAL
+ depends on HAS_TXX9_SERIAL
select SERIAL_CORE
default y
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 7d623003e65..71e6a24d8c2 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -1510,7 +1510,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
}
if ( (retval = pci_request_regions(dev, "icom"))) {
- dev_err(&dev->dev, "pci_request_region FAILED\n");
+ dev_err(&dev->dev, "pci_request_regions FAILED\n");
pci_disable_device(dev);
return retval;
}
diff --git a/fs/Kconfig b/fs/Kconfig
index b3b5aa0edff..276ff3baaaf 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -12,9 +12,7 @@ config EXT2_FS
Ext2 is a standard Linux file system for hard disks.
To compile this file system support as a module, choose M here: the
- module will be called ext2. Be aware however that the file system
- of your root partition (the one containing the directory /) cannot
- be compiled as a module, and so this could be dangerous.
+ module will be called ext2.
If unsure, say Y.
@@ -98,9 +96,7 @@ config EXT3_FS
(available at <http://sourceforge.net/projects/e2fsprogs/>).
To compile this file system support as a module, choose M here: the
- module will be called ext3. Be aware however that the file system
- of your root partition (the one containing the directory /) cannot
- be compiled as a module, and so this may be dangerous.
+ module will be called ext3.
config EXT3_FS_XATTR
bool "Ext3 extended attributes"
@@ -163,9 +159,7 @@ config EXT4DEV_FS
features will be added to ext4dev gradually.
To compile this file system support as a module, choose M here. The
- module will be called ext4dev. Be aware, however, that the filesystem
- of your root partition (the one containing the directory /) cannot
- be compiled as a module, and so this could be dangerous.
+ module will be called ext4dev.
If unsure, say N.
@@ -1008,7 +1002,7 @@ config TMPFS_POSIX_ACL
config HUGETLBFS
bool "HugeTLB file system support"
- depends X86 || IA64 || PPC64 || SPARC64 || SUPERH || BROKEN
+ depends on X86 || IA64 || PPC64 || SPARC64 || SUPERH || BROKEN
help
hugetlbfs is a filesystem backing for HugeTLB pages, based on
ramfs. For architectures that support it, say Y here and read
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 76f06f6bc2f..6e6d4568d54 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -706,12 +706,11 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
return -ELIBBAD;
size = sizeof(*loadmap) + nloads * sizeof(*seg);
- loadmap = kmalloc(size, GFP_KERNEL);
+ loadmap = kzalloc(size, GFP_KERNEL);
if (!loadmap)
return -ENOMEM;
params->loadmap = loadmap;
- memset(loadmap, 0, size);
loadmap->version = ELF32_FDPIC_LOADMAP_VERSION;
loadmap->nsegs = nloads;
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 5679d499307..609a3899475 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -100,6 +100,8 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
#define MSR_P6_PERFCTR0 0xc1
#define MSR_P6_PERFCTR1 0xc2
+#define MSR_FSB_FREQ 0xcd
+
#define MSR_IA32_BBL_CR_CTL 0x119
@@ -130,6 +132,9 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
#define MSR_IA32_PERF_STATUS 0x198
#define MSR_IA32_PERF_CTL 0x199
+#define MSR_IA32_MPERF 0xE7
+#define MSR_IA32_APERF 0xE8
+
#define MSR_IA32_THERM_CONTROL 0x19a
#define MSR_IA32_THERM_INTERRUPT 0x19b
#define MSR_IA32_THERM_STATUS 0x19c
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 952783d35c7..3227bc93d69 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -189,6 +189,7 @@ static inline unsigned int cpuid_edx(unsigned int op)
#define MSR_IA32_PERFCTR0 0xc1
#define MSR_IA32_PERFCTR1 0xc2
+#define MSR_FSB_FREQ 0xcd
#define MSR_MTRRcap 0x0fe
#define MSR_IA32_BBL_CR_CTL 0x119
@@ -311,6 +312,9 @@ static inline unsigned int cpuid_edx(unsigned int op)
#define MSR_IA32_PERF_STATUS 0x198
#define MSR_IA32_PERF_CTL 0x199
+#define MSR_IA32_MPERF 0xE7
+#define MSR_IA32_APERF 0xE8
+
#define MSR_IA32_THERM_CONTROL 0x19a
#define MSR_IA32_THERM_INTERRUPT 0x19b
#define MSR_IA32_THERM_STATUS 0x19c
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 538423d4a86..aca66984aaf 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -40,7 +40,7 @@ extern void __chk_io_ptr(void __iomem *);
#error no compiler-gcc.h file for this gcc version
#elif __GNUC__ == 4
# include <linux/compiler-gcc4.h>
-#elif __GNUC__ == 3
+#elif __GNUC__ == 3 && __GNUC_MINOR__ >= 2
# include <linux/compiler-gcc3.h>
#else
# error Sorry, your compiler is too old/not recognized.
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index a7f01502753..fef6f3d0a4a 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -160,31 +160,6 @@ struct configfs_group_operations {
void (*drop_item)(struct config_group *group, struct config_item *item);
};
-
-
-/**
- * Use these macros to make defining attributes easier. See include/linux/device.h
- * for examples..
- */
-
-#if 0
-#define __ATTR(_name,_mode,_show,_store) { \
- .attr = {.ca_name = __stringify(_name), .ca_mode = _mode, .ca_owner = THIS_MODULE }, \
- .show = _show, \
- .store = _store, \
-}
-
-#define __ATTR_RO(_name) { \
- .attr = { .ca_name = __stringify(_name), .ca_mode = 0444, .ca_owner = THIS_MODULE }, \
- .show = _name##_show, \
-}
-
-#define __ATTR_NULL { .attr = { .name = NULL } }
-
-#define attr_name(_attr) (_attr).attr.name
-#endif
-
-
struct configfs_subsystem {
struct config_group su_group;
struct semaphore su_sem;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 4ea39fee99c..7f008f6bfdc 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -172,6 +172,8 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation);
+extern int cpufreq_driver_getavg(struct cpufreq_policy *policy);
+
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
@@ -204,6 +206,7 @@ struct cpufreq_driver {
unsigned int (*get) (unsigned int cpu);
/* optional */
+ unsigned int (*getavg) (unsigned int cpu);
int (*exit) (struct cpufreq_policy *policy);
int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg);
int (*resume) (struct cpufreq_policy *policy);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 6d8846e7be6..81480e61346 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -11,7 +11,7 @@
** the sysctl() binary interface. Do *NOT* change the
** numbering of any existing values here, and do not change
** any numbers within any one set of values. If you have to
- ** have to redefine an existing interface, use a new number for it.
+ ** redefine an existing interface, use a new number for it.
** The kernel will then return -ENOTDIR to any application using
** the old binary interface.
**
diff --git a/include/rdma/ib_marshall.h b/include/rdma/ib_marshall.h
index 66bf4d7d0df..db037205c9e 100644
--- a/include/rdma/ib_marshall.h
+++ b/include/rdma/ib_marshall.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -41,6 +41,9 @@
void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
struct ib_qp_attr *src);
+void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
+ struct ib_ah_attr *src);
+
void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
struct ib_sa_path_rec *src);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 8eacc351099..fd2353fa7e1 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -43,6 +43,8 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
#include <asm/atomic.h>
#include <asm/scatterlist.h>
@@ -848,6 +850,49 @@ struct ib_cache {
u8 *lmc_cache;
};
+struct ib_dma_mapping_ops {
+ int (*mapping_error)(struct ib_device *dev,
+ u64 dma_addr);
+ u64 (*map_single)(struct ib_device *dev,
+ void *ptr, size_t size,
+ enum dma_data_direction direction);
+ void (*unmap_single)(struct ib_device *dev,
+ u64 addr, size_t size,
+ enum dma_data_direction direction);
+ u64 (*map_page)(struct ib_device *dev,
+ struct page *page, unsigned long offset,
+ size_t size,
+ enum dma_data_direction direction);
+ void (*unmap_page)(struct ib_device *dev,
+ u64 addr, size_t size,
+ enum dma_data_direction direction);
+ int (*map_sg)(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction);
+ void (*unmap_sg)(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction);
+ u64 (*dma_address)(struct ib_device *dev,
+ struct scatterlist *sg);
+ unsigned int (*dma_len)(struct ib_device *dev,
+ struct scatterlist *sg);
+ void (*sync_single_for_cpu)(struct ib_device *dev,
+ u64 dma_handle,
+ size_t size,
+ enum dma_data_direction dir);
+ void (*sync_single_for_device)(struct ib_device *dev,
+ u64 dma_handle,
+ size_t size,
+ enum dma_data_direction dir);
+ void *(*alloc_coherent)(struct ib_device *dev,
+ size_t size,
+ u64 *dma_handle,
+ gfp_t flag);
+ void (*free_coherent)(struct ib_device *dev,
+ size_t size, void *cpu_addr,
+ u64 dma_handle);
+};
+
struct iw_cm_verbs;
struct ib_device {
@@ -992,6 +1037,8 @@ struct ib_device {
struct ib_mad *in_mad,
struct ib_mad *out_mad);
+ struct ib_dma_mapping_ops *dma_ops;
+
struct module *owner;
struct class_device class_dev;
struct kobject ports_parent;
@@ -1395,10 +1442,216 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
* usable for DMA.
* @pd: The protection domain associated with the memory region.
* @mr_access_flags: Specifies the memory access rights.
+ *
+ * Note that the ib_dma_*() functions defined below must be used
+ * to create/destroy addresses used with the Lkey or Rkey returned
+ * by ib_get_dma_mr().
*/
struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
/**
+ * ib_dma_mapping_error - check a DMA addr for error
+ * @dev: The device for which the dma_addr was created
+ * @dma_addr: The DMA address to check
+ */
+static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+ return dev->dma_ops ?
+ dev->dma_ops->mapping_error(dev, dma_addr) :
+ dma_mapping_error(dma_addr);
+}
+
+/**
+ * ib_dma_map_single - Map a kernel virtual address to DMA address
+ * @dev: The device for which the dma_addr is to be created
+ * @cpu_addr: The kernel virtual address
+ * @size: The size of the region in bytes
+ * @direction: The direction of the DMA
+ */
+static inline u64 ib_dma_map_single(struct ib_device *dev,
+ void *cpu_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ return dev->dma_ops ?
+ dev->dma_ops->map_single(dev, cpu_addr, size, direction) :
+ dma_map_single(dev->dma_device, cpu_addr, size, direction);
+}
+
+/**
+ * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
+ * @dev: The device for which the DMA address was created
+ * @addr: The DMA address
+ * @size: The size of the region in bytes
+ * @direction: The direction of the DMA
+ */
+static inline void ib_dma_unmap_single(struct ib_device *dev,
+ u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ dev->dma_ops ?
+ dev->dma_ops->unmap_single(dev, addr, size, direction) :
+ dma_unmap_single(dev->dma_device, addr, size, direction);
+}
+
+/**
+ * ib_dma_map_page - Map a physical page to DMA address
+ * @dev: The device for which the dma_addr is to be created
+ * @page: The page to be mapped
+ * @offset: The offset within the page
+ * @size: The size of the region in bytes
+ * @direction: The direction of the DMA
+ */
+static inline u64 ib_dma_map_page(struct ib_device *dev,
+ struct page *page,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ return dev->dma_ops ?
+ dev->dma_ops->map_page(dev, page, offset, size, direction) :
+ dma_map_page(dev->dma_device, page, offset, size, direction);
+}
+
+/**
+ * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
+ * @dev: The device for which the DMA address was created
+ * @addr: The DMA address
+ * @size: The size of the region in bytes
+ * @direction: The direction of the DMA
+ */
+static inline void ib_dma_unmap_page(struct ib_device *dev,
+ u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ dev->dma_ops ?
+ dev->dma_ops->unmap_page(dev, addr, size, direction) :
+ dma_unmap_page(dev->dma_device, addr, size, direction);
+}
+
+/**
+ * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
+ * @dev: The device for which the DMA addresses are to be created
+ * @sg: The array of scatter/gather entries
+ * @nents: The number of scatter/gather entries
+ * @direction: The direction of the DMA
+ */
+static inline int ib_dma_map_sg(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ return dev->dma_ops ?
+ dev->dma_ops->map_sg(dev, sg, nents, direction) :
+ dma_map_sg(dev->dma_device, sg, nents, direction);
+}
+
+/**
+ * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
+ * @dev: The device for which the DMA addresses were created
+ * @sg: The array of scatter/gather entries
+ * @nents: The number of scatter/gather entries
+ * @direction: The direction of the DMA
+ */
+static inline void ib_dma_unmap_sg(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ dev->dma_ops ?
+ dev->dma_ops->unmap_sg(dev, sg, nents, direction) :
+ dma_unmap_sg(dev->dma_device, sg, nents, direction);
+}
+
+/**
+ * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
+ * @dev: The device for which the DMA addresses were created
+ * @sg: The scatter/gather entry
+ */
+static inline u64 ib_sg_dma_address(struct ib_device *dev,
+ struct scatterlist *sg)
+{
+ return dev->dma_ops ?
+ dev->dma_ops->dma_address(dev, sg) : sg_dma_address(sg);
+}
+
+/**
+ * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
+ * @dev: The device for which the DMA addresses were created
+ * @sg: The scatter/gather entry
+ */
+static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
+ struct scatterlist *sg)
+{
+ return dev->dma_ops ?
+ dev->dma_ops->dma_len(dev, sg) : sg_dma_len(sg);
+}
+
+/**
+ * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
+ * @dev: The device for which the DMA address was created
+ * @addr: The DMA address
+ * @size: The size of the region in bytes
+ * @dir: The direction of the DMA
+ */
+static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
+ u64 addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ dev->dma_ops ?
+ dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir) :
+ dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
+}
+
+/**
+ * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
+ * @dev: The device for which the DMA address was created
+ * @addr: The DMA address
+ * @size: The size of the region in bytes
+ * @dir: The direction of the DMA
+ */
+static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
+ u64 addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ dev->dma_ops ?
+ dev->dma_ops->sync_single_for_device(dev, addr, size, dir) :
+ dma_sync_single_for_device(dev->dma_device, addr, size, dir);
+}
+
+/**
+ * ib_dma_alloc_coherent - Allocate memory and map it for DMA
+ * @dev: The device for which the DMA address is requested
+ * @size: The size of the region to allocate in bytes
+ * @dma_handle: A pointer for returning the DMA address of the region
+ * @flag: memory allocator flags
+ */
+static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
+ size_t size,
+ u64 *dma_handle,
+ gfp_t flag)
+{
+ return dev->dma_ops ?
+ dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag) :
+ dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
+}
+
+/**
+ * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
+ * @dev: The device for which the DMA addresses were allocated
+ * @size: The size of the region
+ * @cpu_addr: the address returned by ib_dma_alloc_coherent()
+ * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
+ */
+static inline void ib_dma_free_coherent(struct ib_device *dev,
+ size_t size, void *cpu_addr,
+ u64 dma_handle)
+{
+ dev->dma_ops ?
+ dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle) :
+ dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
+}
+
+/**
* ib_reg_phys_mr - Prepares a virtually addressed memory region for use
* by an HCA.
* @pd: The protection domain associated assigned to the registered region.
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index deb5a0a4cee..36cd8a8526a 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -77,11 +77,34 @@ struct rdma_route {
int num_paths;
};
+struct rdma_conn_param {
+ const void *private_data;
+ u8 private_data_len;
+ u8 responder_resources;
+ u8 initiator_depth;
+ u8 flow_control;
+ u8 retry_count; /* ignored when accepting */
+ u8 rnr_retry_count;
+ /* Fields below ignored if a QP is created on the rdma_cm_id. */
+ u8 srq;
+ u32 qp_num;
+};
+
+struct rdma_ud_param {
+ const void *private_data;
+ u8 private_data_len;
+ struct ib_ah_attr ah_attr;
+ u32 qp_num;
+ u32 qkey;
+};
+
struct rdma_cm_event {
enum rdma_cm_event_type event;
int status;
- void *private_data;
- u8 private_data_len;
+ union {
+ struct rdma_conn_param conn;
+ struct rdma_ud_param ud;
+ } param;
};
struct rdma_cm_id;
@@ -204,25 +227,17 @@ void rdma_destroy_qp(struct rdma_cm_id *id);
int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
int *qp_attr_mask);
-struct rdma_conn_param {
- const void *private_data;
- u8 private_data_len;
- u8 responder_resources;
- u8 initiator_depth;
- u8 flow_control;
- u8 retry_count; /* ignored when accepting */
- u8 rnr_retry_count;
- /* Fields below ignored if a QP is created on the rdma_cm_id. */
- u8 srq;
- u32 qp_num;
- enum ib_qp_type qp_type;
-};
-
/**
* rdma_connect - Initiate an active connection request.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
*
* Users must have resolved a route for the rdma_cm_id to connect with
* by having called rdma_resolve_route before calling this routine.
+ *
+ * This call will either connect to a remote QP or obtain remote QP
+ * information for unconnected rdma_cm_id's. The actual operation is
+ * based on the rdma_cm_id's port space.
*/
int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
@@ -253,6 +268,21 @@ int rdma_listen(struct rdma_cm_id *id, int backlog);
int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
/**
+ * rdma_notify - Notifies the RDMA CM of an asynchronous event that has
+ * occurred on the connection.
+ * @id: Connection identifier to transition to established.
+ * @event: Asynchronous event.
+ *
+ * This routine should be invoked by users to notify the CM of relevant
+ * communication events. Events that should be reported to the CM and
+ * when to report them are:
+ *
+ * IB_EVENT_COMM_EST - Used when a message is received on a connected
+ * QP before an RTU has been received.
+ */
+int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event);
+
+/**
* rdma_reject - Called to reject a connection request or response.
*/
int rdma_reject(struct rdma_cm_id *id, const void *private_data,
diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h
index e8c3af1804d..9b176df1d66 100644
--- a/include/rdma/rdma_cm_ib.h
+++ b/include/rdma/rdma_cm_ib.h
@@ -44,4 +44,7 @@
int rdma_set_ib_paths(struct rdma_cm_id *id,
struct ib_sa_path_rec *path_rec, int num_paths);
+/* Global qkey for UD QPs and multicast groups. */
+#define RDMA_UD_QKEY 0x01234567
+
#endif /* RDMA_CM_IB_H */
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h
new file mode 100644
index 00000000000..9572ab8eeac
--- /dev/null
+++ b/include/rdma/rdma_user_cm.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RDMA_USER_CM_H
+#define RDMA_USER_CM_H
+
+#include <linux/types.h>
+#include <linux/in6.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_user_sa.h>
+
+#define RDMA_USER_CM_ABI_VERSION 3
+
+#define RDMA_MAX_PRIVATE_DATA 256
+
+enum {
+ RDMA_USER_CM_CMD_CREATE_ID,
+ RDMA_USER_CM_CMD_DESTROY_ID,
+ RDMA_USER_CM_CMD_BIND_ADDR,
+ RDMA_USER_CM_CMD_RESOLVE_ADDR,
+ RDMA_USER_CM_CMD_RESOLVE_ROUTE,
+ RDMA_USER_CM_CMD_QUERY_ROUTE,
+ RDMA_USER_CM_CMD_CONNECT,
+ RDMA_USER_CM_CMD_LISTEN,
+ RDMA_USER_CM_CMD_ACCEPT,
+ RDMA_USER_CM_CMD_REJECT,
+ RDMA_USER_CM_CMD_DISCONNECT,
+ RDMA_USER_CM_CMD_INIT_QP_ATTR,
+ RDMA_USER_CM_CMD_GET_EVENT,
+ RDMA_USER_CM_CMD_GET_OPTION,
+ RDMA_USER_CM_CMD_SET_OPTION,
+ RDMA_USER_CM_CMD_NOTIFY
+};
+
+/*
+ * command ABI structures.
+ */
+struct rdma_ucm_cmd_hdr {
+ __u32 cmd;
+ __u16 in;
+ __u16 out;
+};
+
+struct rdma_ucm_create_id {
+ __u64 uid;
+ __u64 response;
+ __u16 ps;
+ __u8 reserved[6];
+};
+
+struct rdma_ucm_create_id_resp {
+ __u32 id;
+};
+
+struct rdma_ucm_destroy_id {
+ __u64 response;
+ __u32 id;
+ __u32 reserved;
+};
+
+struct rdma_ucm_destroy_id_resp {
+ __u32 events_reported;
+};
+
+struct rdma_ucm_bind_addr {
+ __u64 response;
+ struct sockaddr_in6 addr;
+ __u32 id;
+};
+
+struct rdma_ucm_resolve_addr {
+ struct sockaddr_in6 src_addr;
+ struct sockaddr_in6 dst_addr;
+ __u32 id;
+ __u32 timeout_ms;
+};
+
+struct rdma_ucm_resolve_route {
+ __u32 id;
+ __u32 timeout_ms;
+};
+
+struct rdma_ucm_query_route {
+ __u64 response;
+ __u32 id;
+ __u32 reserved;
+};
+
+struct rdma_ucm_query_route_resp {
+ __u64 node_guid;
+ struct ib_user_path_rec ib_route[2];
+ struct sockaddr_in6 src_addr;
+ struct sockaddr_in6 dst_addr;
+ __u32 num_paths;
+ __u8 port_num;
+ __u8 reserved[3];
+};
+
+struct rdma_ucm_conn_param {
+ __u32 qp_num;
+ __u32 reserved;
+ __u8 private_data[RDMA_MAX_PRIVATE_DATA];
+ __u8 private_data_len;
+ __u8 srq;
+ __u8 responder_resources;
+ __u8 initiator_depth;
+ __u8 flow_control;
+ __u8 retry_count;
+ __u8 rnr_retry_count;
+ __u8 valid;
+};
+
+struct rdma_ucm_ud_param {
+ __u32 qp_num;
+ __u32 qkey;
+ struct ib_uverbs_ah_attr ah_attr;
+ __u8 private_data[RDMA_MAX_PRIVATE_DATA];
+ __u8 private_data_len;
+ __u8 reserved[7];
+};
+
+struct rdma_ucm_connect {
+ struct rdma_ucm_conn_param conn_param;
+ __u32 id;
+ __u32 reserved;
+};
+
+struct rdma_ucm_listen {
+ __u32 id;
+ __u32 backlog;
+};
+
+struct rdma_ucm_accept {
+ __u64 uid;
+ struct rdma_ucm_conn_param conn_param;
+ __u32 id;
+ __u32 reserved;
+};
+
+struct rdma_ucm_reject {
+ __u32 id;
+ __u8 private_data_len;
+ __u8 reserved[3];
+ __u8 private_data[RDMA_MAX_PRIVATE_DATA];
+};
+
+struct rdma_ucm_disconnect {
+ __u32 id;
+};
+
+struct rdma_ucm_init_qp_attr {
+ __u64 response;
+ __u32 id;
+ __u32 qp_state;
+};
+
+struct rdma_ucm_notify {
+ __u32 id;
+ __u32 event;
+};
+
+struct rdma_ucm_get_event {
+ __u64 response;
+};
+
+struct rdma_ucm_event_resp {
+ __u64 uid;
+ __u32 id;
+ __u32 event;
+ __u32 status;
+ union {
+ struct rdma_ucm_conn_param conn;
+ struct rdma_ucm_ud_param ud;
+ } param;
+};
+
+#endif /* RDMA_USER_CM_H */
diff --git a/init/Kconfig b/init/Kconfig
index 9edf103b3ec..f000edb3bb7 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -348,7 +348,7 @@ config SYSCTL_SYSCALL
If unsure say Y here.
config KALLSYMS
- bool "Load all symbols for debugging/kksymoops" if EMBEDDED
+ bool "Load all symbols for debugging/ksymoops" if EMBEDDED
default y
help
Say Y here to let the kernel print out symbolic crash information and
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0701ddda1df..818e4589f71 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -437,7 +437,7 @@ config FAIL_PAGE_ALLOC
Provide fault-injection capability for alloc_pages().
config FAIL_MAKE_REQUEST
- bool "Fault-injection capabilitiy for disk IO"
+ bool "Fault-injection capability for disk IO"
depends on FAULT_INJECTION
help
Provide fault-injection capability for disk IO.
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 74046efdf87..8ce00d3703d 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -565,7 +565,7 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
} else {
struct sk_buff *free_it = next;
- /* Old fragmnet is completely overridden with
+ /* Old fragment is completely overridden with
* new one drop it.
*/
next = next->next;
diff --git a/sound/aoa/fabrics/Kconfig b/sound/aoa/fabrics/Kconfig
index c3bc7705c86..50d7021ff67 100644
--- a/sound/aoa/fabrics/Kconfig
+++ b/sound/aoa/fabrics/Kconfig
@@ -1,6 +1,6 @@
config SND_AOA_FABRIC_LAYOUT
tristate "layout-id fabric"
- depends SND_AOA
+ depends on SND_AOA
select SND_AOA_SOUNDBUS
select SND_AOA_SOUNDBUS_I2S
---help---