Merge branch 'bugfixes' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 29 Apr 2010 17:23:44 +0000 (10:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 29 Apr 2010 17:23:44 +0000 (10:23 -0700)
* 'bugfixes' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6:
  nfs: fix memory leak in nfs_get_sb with CONFIG_NFS_V4
  nfs: fix some issues in nfs41_proc_reclaim_complete()
  NFS: Ensure that nfs_wb_page() waits for Pg_writeback to clear
  NFS: Fix an unstable write data integrity race
  nfs: testing for null instead of ERR_PTR()
  NFS: rsize and wsize settings ignored on v4 mounts
  NFSv4: Don't attempt an atomic open if the file is a mountpoint
  SUNRPC: Fix a bug in rpcauth_prune_expired

245 files changed:
Documentation/HOWTO
Documentation/cgroups/cgroups.txt
Documentation/kernel-parameters.txt
Documentation/stable_kernel_rules.txt
MAINTAINERS
arch/arm/configs/n8x0_defconfig
arch/arm/configs/omap_zoom2_defconfig
arch/arm/configs/omap_zoom3_defconfig
arch/arm/configs/rx51_defconfig
arch/arm/mach-omap1/timer32k.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/board-3630sdp.c
arch/arm/mach-omap2/board-am3517evm.c
arch/arm/mach-omap2/board-devkit8000.c
arch/arm/mach-omap2/board-igep0020.c
arch/arm/mach-omap2/board-n8x0.c
arch/arm/mach-omap2/board-sdp-flash.c
arch/arm/mach-omap2/board-zoom-debugboard.c
arch/arm/mach-omap2/board-zoom-peripherals.c
arch/arm/mach-omap2/clock3xxx_data.c
arch/arm/mach-omap2/clock44xx_data.c
arch/arm/mach-omap2/clockdomain.c
arch/arm/mach-omap2/devices.c
arch/arm/mach-omap2/gpmc-nand.c
arch/arm/mach-omap2/include/mach/entry-macro.S
arch/arm/mach-omap2/omap-headsmp.S
arch/arm/mach-omap2/omap44xx-smc.S
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/powerdomain.c
arch/arm/mach-omap2/prcm.c
arch/arm/mach-omap2/serial.c
arch/arm/plat-omap/common.c
arch/arm/plat-omap/dma.c
arch/arm/plat-omap/gpio.c
arch/arm/plat-omap/include/plat/irqs.h
arch/arm/plat-omap/include/plat/mcbsp.h
arch/arm/plat-omap/include/plat/nand.h
arch/arm/plat-omap/include/plat/omap44xx.h
arch/arm/plat-omap/include/plat/omap_hwmod.h
arch/avr32/kernel/ptrace.c
arch/um/drivers/line.c
arch/um/os-Linux/helper.c
arch/x86/kernel/apb_timer.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/vmware.c
arch/x86/kernel/process_64.c
arch/x86/pci/acpi.c
arch/x86/pci/i386.c
block/blk-timeout.c
crypto/authenc.c
drivers/ata/libata-eh.c
drivers/ata/pata_pcmcia.c
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_receiver.c
drivers/block/pktcdvd.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_conservative.c
drivers/firewire/core-iso.c
drivers/firewire/ohci.c
drivers/gpio/pca953x.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_opregion.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r300_cmdbuf.c
drivers/gpu/drm/radeon/r420.c
drivers/gpu/drm/radeon/radeon_cp.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/hwmon/asus_atk0110.c
drivers/hwmon/hp_accel.c
drivers/ide/ide-cs.c
drivers/md/raid5.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/vmware_balloon.c [new file with mode: 0644]
drivers/mtd/Makefile
drivers/mtd/internal.h
drivers/mtd/mtdbdi.c
drivers/mtd/mtdcore.c
drivers/mtd/mtdsuper.c
drivers/mtd/nand/orion_nand.c
drivers/net/8139too.c
drivers/net/Makefile
drivers/net/bnx2.c
drivers/net/can/usb/ems_usb.c
drivers/net/cxgb3/ael1002.c
drivers/net/cxgb3/cxgb3_main.c
drivers/net/e100.c
drivers/net/e1000e/82571.c
drivers/net/e1000e/e1000.h
drivers/net/e1000e/netdev.c
drivers/net/fsl_pq_mdio.c
drivers/net/gianfar.c
drivers/net/ixgbe/ixgbe_82599.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ixgbe/ixgbe_type.h
drivers/net/ks8851.c
drivers/net/pcmcia/3c574_cs.c
drivers/net/pcmcia/smc91c92_cs.c
drivers/net/r8169.c
drivers/net/sfc/efx.c
drivers/net/sfc/falcon.c
drivers/net/sfc/falcon_boards.c
drivers/net/sfc/nic.h
drivers/net/sfc/siena.c
drivers/net/tg3.c
drivers/net/usb/Kconfig
drivers/net/usb/Makefile
drivers/net/usb/cdc_ether.c
drivers/net/usb/ipheth.c [new file with mode: 0644]
drivers/net/usb/kaweth.c
drivers/net/usb/sierra_net.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-dev.h
drivers/net/wireless/iwlwifi/iwl-eeprom.h
drivers/net/wireless/iwlwifi/iwl-scan.c
drivers/pci/pci.c
drivers/pci/pcie/aer/aerdrv.c
drivers/pci/probe.c
drivers/pcmcia/ds.c
drivers/platform/x86/Kconfig
drivers/platform/x86/asus-laptop.c
drivers/platform/x86/dell-wmi.c
drivers/platform/x86/eeepc-laptop.c
drivers/platform/x86/eeepc-wmi.c
drivers/regulator/max8925-regulator.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/usb/core/driver.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/ehci-mem.c
drivers/usb/host/ehci-omap.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/ehci.h
drivers/usb/host/ohci-da8xx.c
drivers/usb/misc/usbsevseg.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/qcaux.c
drivers/usb/serial/sierra.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/ti_usb_3410_5052.h
drivers/usb/wusbcore/devconnect.c
drivers/video/efifb.c
drivers/w1/masters/omap_hdq.c
drivers/w1/slaves/w1_therm.c
drivers/watchdog/booke_wdt.c
drivers/watchdog/sb_wdog.c
drivers/watchdog/sbc_fitpc2_wdt.c
fs/9p/v9fs.c
fs/9p/v9fs.h
fs/9p/vfs_super.c
fs/afs/internal.h
fs/afs/super.c
fs/afs/volume.c
fs/binfmt_elf_fdpic.c
fs/block_dev.c
fs/btrfs/disk-io.c
fs/cifs/cifs_fs_sb.h
fs/cifs/cifsfs.c
fs/coda/inode.c
fs/compat_ioctl.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/main.c
fs/ecryptfs/super.c
fs/exofs/exofs.h
fs/exofs/super.c
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ioctl.c
fs/ncpfs/inode.c
fs/nfs/dir.c
fs/nfsd/nfs4xdr.c
fs/proc/base.c
fs/reiserfs/dir.c
fs/reiserfs/xattr.c
fs/smbfs/inode.c
fs/squashfs/block.c
fs/squashfs/super.c
fs/squashfs/zlib_wrapper.c
fs/super.c
fs/sync.c
fs/xfs/xfs_dfrag.c
include/linux/backing-dev.h
include/linux/coda_psdev.h
include/linux/firewire-cdev.h
include/linux/firewire-constants.h
include/linux/fs.h
include/linux/ncp_fs_sb.h
include/linux/poison.h
include/linux/smb_fs_sb.h
include/net/sctp/command.h
include/net/sctp/sctp.h
include/pcmcia/ds.h
include/pcmcia/ss.h
init/initramfs.c
kernel/sys.c
lib/decompress_unlzo.c
lib/flex_array.c
lib/vsprintf.c
mm/backing-dev.c
mm/hugetlb.c
mm/ksm.c
mm/memcontrol.c
mm/mmap.c
mm/rmap.c
net/bluetooth/l2cap.c
net/bridge/br_multicast.c
net/core/dev.c
net/core/rtnetlink.c
net/ieee802154/af_ieee802154.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/xfrm6_policy.c
net/mac80211/agg-tx.c
net/mac80211/mlme.c
net/rds/rdma_transport.c
net/sctp/associola.c
net/sctp/endpointola.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/x25/af_x25.c
security/keys/keyring.c
security/keys/request_key.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/maestro3.c

index f5395af..40ada93 100644 (file)
@@ -234,7 +234,7 @@ process is as follows:
     Linus, usually the patches that have already been included in the
     -next kernel for a few weeks.  The preferred way to submit big changes
     is using git (the kernel's source management tool, more information
-    can be found at http://git.or.cz/) but plain patches are also just
+    can be found at http://git-scm.com/) but plain patches are also just
     fine.
   - After two weeks a -rc1 kernel is released it is now possible to push
     only patches that do not include new features that could affect the
index fd588ff..a1ca592 100644 (file)
@@ -235,8 +235,7 @@ containing the following files describing that cgroup:
  - cgroup.procs: list of tgids in the cgroup.  This list is not
    guaranteed to be sorted or free of duplicate tgids, and userspace
    should sort/uniquify the list if this property is required.
-   Writing a tgid into this file moves all threads with that tgid into
-   this cgroup.
+   This is a read-only file, for now.
  - notify_on_release flag: run the release agent on exit?
  - release_agent: the path to use for release notifications (this file
    exists in the top cgroup only)
index e2202e9..839b21b 100644 (file)
@@ -1194,7 +1194,7 @@ and is between 256 and 4096 characters. It is defined in the file
 
        libata.force=   [LIBATA] Force configurations.  The format is comma
                        separated list of "[ID:]VAL" where ID is
-                       PORT[:DEVICE].  PORT and DEVICE are decimal numbers
+                       PORT[.DEVICE].  PORT and DEVICE are decimal numbers
                        matching port, link or device.  Basically, it matches
                        the ATA ID string printed on console by libata.  If
                        the whole ID part is omitted, the last PORT and DEVICE
index 5effa5b..e213f45 100644 (file)
@@ -18,16 +18,15 @@ Rules on what kind of patches are accepted, and which ones are not, into the
  - It cannot contain any "trivial" fixes in it (spelling changes,
    whitespace cleanups, etc).
  - It must follow the Documentation/SubmittingPatches rules.
- - It or an equivalent fix must already exist in Linus' tree.  Quote the
-   respective commit ID in Linus' tree in your patch submission to -stable.
+ - It or an equivalent fix must already exist in Linus' tree (upstream).
 
 
 Procedure for submitting patches to the -stable tree:
 
  - Send the patch, after verifying that it follows the above rules, to
-   stable@kernel.org.
- - To have the patch automatically included in the stable tree, add the
  the tag
+   stable@kernel.org.  You must note the upstream commit ID in the changelog
+   of your submission.
- To have the patch automatically included in the stable tree, add the tag
      Cc: stable@kernel.org
    in the sign-off area. Once the patch is merged it will be applied to
    the stable tree without anything else needing to be done by the author
index a2d9254..1838875 100644 (file)
@@ -1960,7 +1960,7 @@ F:        lib/kobj*
 
 DRM DRIVERS
 M:     David Airlie <airlied@linux.ie>
-L:     dri-devel@lists.sourceforge.net
+L:     dri-devel@lists.freedesktop.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git
 S:     Maintained
 F:     drivers/gpu/drm/
index 216ad00..9405e32 100644 (file)
@@ -1058,7 +1058,6 @@ CONFIG_JFFS2_CMODE_PRIORITY=y
 # CONFIG_ROMFS_FS is not set
 # CONFIG_SYSV_FS is not set
 # CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
 CONFIG_NETWORK_FILESYSTEMS=y
 # CONFIG_NFS_FS is not set
 # CONFIG_NFSD is not set
index f5c6e11..881faea 100644 (file)
@@ -661,7 +661,7 @@ CONFIG_DEVKMEM=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=32
-CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
 CONFIG_SERIAL_8250_EXTENDED=y
 CONFIG_SERIAL_8250_MANY_PORTS=y
 CONFIG_SERIAL_8250_SHARE_IRQ=y
index ea9a501..5e55b55 100644 (file)
@@ -680,7 +680,7 @@ CONFIG_DEVKMEM=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=32
-CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
 CONFIG_SERIAL_8250_EXTENDED=y
 CONFIG_SERIAL_8250_MANY_PORTS=y
 CONFIG_SERIAL_8250_SHARE_IRQ=y
index 45135ff..473f9e1 100644 (file)
@@ -59,8 +59,6 @@ CONFIG_FAIR_GROUP_SCHED=y
 CONFIG_USER_SCHED=y
 # CONFIG_CGROUP_SCHED is not set
 # CONFIG_CGROUPS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_RELAY is not set
 # CONFIG_NAMESPACES is not set
 CONFIG_BLK_DEV_INITRD=y
@@ -480,7 +478,6 @@ CONFIG_BT_HIDP=m
 # CONFIG_BT_HCIBFUSB is not set
 # CONFIG_BT_HCIVHCI is not set
 # CONFIG_AF_RXRPC is not set
-# CONFIG_PHONET is not set
 CONFIG_WIRELESS=y
 CONFIG_CFG80211=y
 # CONFIG_CFG80211_REG_DEBUG is not set
index 9ad1185..20cfbcc 100644 (file)
@@ -68,12 +68,6 @@ struct sys_timer omap_timer;
  * ---------------------------------------------------------------------------
  */
 
-#if defined(CONFIG_ARCH_OMAP16XX)
-#define TIMER_32K_SYNCHRONIZED         0xfffbc410
-#else
-#error OMAP 32KHz timer does not currently work on 15XX!
-#endif
-
 /* 16xx specific defines */
 #define OMAP1_32K_TIMER_BASE           0xfffb9000
 #define OMAP1_32K_TIMER_CR             0x08
@@ -150,15 +144,6 @@ static struct clock_event_device clockevent_32k_timer = {
        .set_mode       = omap_32k_timer_set_mode,
 };
 
-/*
- * The 32KHz synchronized timer is an additional timer on 16xx.
- * It is always running.
- */
-static inline unsigned long omap_32k_sync_timer_read(void)
-{
-       return omap_readl(TIMER_32K_SYNCHRONIZED);
-}
-
 static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id)
 {
        struct clock_event_device *evt = &clockevent_32k_timer;
index a8a3d1e..2455dcc 100644 (file)
@@ -59,8 +59,10 @@ config MACH_OMAP3_BEAGLE
        select OMAP_PACKAGE_CBB
 
 config MACH_DEVKIT8000
-        bool "DEVKIT8000 board"
-        depends on ARCH_OMAP3
+       bool "DEVKIT8000 board"
+       depends on ARCH_OMAP3
+       select OMAP_PACKAGE_CUS
+       select OMAP_MUX
 
 config MACH_OMAP_LDP
        bool "OMAP3 LDP board"
index a0a2a11..504d2bd 100644 (file)
@@ -96,6 +96,7 @@ static struct omap_board_mux board_mux[] __initdata = {
 static void __init omap_sdp_init(void)
 {
        omap3_mux_init(board_mux, OMAP_PACKAGE_CBP);
+       omap_serial_init();
        zoom_peripherals_init();
        board_smc91x_init();
        enable_board_wakeup_source();
index 6ae8805..c1c4389 100644 (file)
@@ -294,9 +294,9 @@ static struct omap_board_mux board_mux[] __initdata = {
 
 static void __init am3517_evm_init(void)
 {
-       am3517_evm_i2c_init();
-
        omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
+
+       am3517_evm_i2c_init();
        platform_add_devices(am3517_evm_devices,
                                ARRAY_SIZE(am3517_evm_devices));
 
index 5bfc13b..47e3af2 100644 (file)
@@ -50,7 +50,6 @@
 #include <linux/input/matrix_keypad.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
-#include <linux/usb/otg.h>
 #include <linux/dm9000.h>
 #include <linux/interrupt.h>
 
@@ -269,20 +268,6 @@ static int devkit8000_twl_gpio_setup(struct device *dev,
        devkit8000_vmmc1_supply.dev = mmc[0].dev;
        devkit8000_vsim_supply.dev = mmc[0].dev;
 
-       /* REVISIT: need ehci-omap hooks for external VBUS
-        * power switch and overcurrent detect
-        */
-
-       gpio_request(gpio + 1, "EHCI_nOC");
-       gpio_direction_input(gpio + 1);
-
-       /* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, active low) */
-       gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR");
-       gpio_direction_output(gpio + TWL4030_GPIO_MAX, 1);
-
-       /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
-       gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
-
        return 0;
 }
 
@@ -303,7 +288,7 @@ static struct regulator_consumer_supply devkit8000_vpll2_supplies[] = {
        .dev            = &devkit8000_lcd_device.dev,
        },
        {
-       .supply         = "vdss_dsi",
+       .supply         = "vdds_dsi",
        .dev            = &devkit8000_dss_device.dev,
        }
 };
@@ -639,17 +624,21 @@ static struct omap_musb_board_data musb_board_data = {
 static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
 
        .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
-       .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
+       .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN,
        .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
 
        .phy_reset  = true,
        .reset_gpio_port[0]  = -EINVAL,
-       .reset_gpio_port[1]  = 147,
+       .reset_gpio_port[1]  = -EINVAL,
        .reset_gpio_port[2]  = -EINVAL
 };
 
 static void __init devkit8000_init(void)
 {
+       omap_serial_init();
+
+       omap_dm9000_init();
+
        devkit8000_i2c_init();
        platform_add_devices(devkit8000_devices,
                        ARRAY_SIZE(devkit8000_devices));
@@ -659,25 +648,15 @@ static void __init devkit8000_init(void)
        spi_register_board_info(devkit8000_spi_board_info,
        ARRAY_SIZE(devkit8000_spi_board_info));
 
-       omap_serial_init();
-
-       omap_dm9000_init();
-
        devkit8000_ads7846_init();
 
-       omap_mux_init_gpio(170, OMAP_PIN_INPUT);
-
-       gpio_request(170, "DVI_nPD");
-       /* REVISIT leave DVI powered down until it's needed ... */
-       gpio_direction_output(170, true);
-
        usb_musb_init(&musb_board_data);
        usb_ehci_init(&ehci_pdata);
        devkit8000_flash_init();
 
        /* Ensure SDRC pins are mux'd for self-refresh */
-       omap_mux_init_signal("sdr_cke0", OMAP_PIN_OUTPUT);
-       omap_mux_init_signal("sdr_cke1", OMAP_PIN_OUTPUT);
+       omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
+       omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
 }
 
 static void __init devkit8000_map_io(void)
index 3c7789d..d55c57b 100644 (file)
@@ -458,13 +458,13 @@ static struct omap_musb_board_data musb_board_data = {
 };
 
 static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
-       .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN,
-       .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
+       .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
+       .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN,
        .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
 
        .phy_reset = true,
-       .reset_gpio_port[0] = -EINVAL,
-       .reset_gpio_port[1] = IGEP2_GPIO_USBH_NRESET,
+       .reset_gpio_port[0] = IGEP2_GPIO_USBH_NRESET,
+       .reset_gpio_port[1] = -EINVAL,
        .reset_gpio_port[2] = -EINVAL,
 };
 
index da9bcb8..3ccc34e 100644 (file)
@@ -216,7 +216,7 @@ static void __init n8x0_onenand_init(void) {}
  */
 #define N8X0_SLOT_SWITCH_GPIO  96
 #define N810_EMMC_VSD_GPIO     23
-#define NN810_EMMC_VIO_GPIO    9
+#define N810_EMMC_VIO_GPIO     9
 
 static int n8x0_mmc_switch_slot(struct device *dev, int slot)
 {
@@ -304,10 +304,10 @@ static void n810_set_power_emmc(struct device *dev,
        if (power_on) {
                gpio_set_value(N810_EMMC_VSD_GPIO, 1);
                msleep(1);
-               gpio_set_value(NN810_EMMC_VIO_GPIO, 1);
+               gpio_set_value(N810_EMMC_VIO_GPIO, 1);
                msleep(1);
        } else {
-               gpio_set_value(NN810_EMMC_VIO_GPIO, 0);
+               gpio_set_value(N810_EMMC_VIO_GPIO, 0);
                msleep(50);
                gpio_set_value(N810_EMMC_VSD_GPIO, 0);
                msleep(50);
@@ -468,7 +468,7 @@ static void n8x0_mmc_cleanup(struct device *dev)
 
        if (machine_is_nokia_n810()) {
                gpio_free(N810_EMMC_VSD_GPIO);
-               gpio_free(NN810_EMMC_VIO_GPIO);
+               gpio_free(N810_EMMC_VIO_GPIO);
        }
 }
 
@@ -529,7 +529,7 @@ void __init n8x0_mmc_init(void)
 
        err = gpio_request(N8X0_SLOT_SWITCH_GPIO, "MMC slot switch");
        if (err)
-               return err;
+               return;
 
        gpio_direction_output(N8X0_SLOT_SWITCH_GPIO, 0);
 
@@ -537,17 +537,17 @@ void __init n8x0_mmc_init(void)
                err = gpio_request(N810_EMMC_VSD_GPIO, "MMC slot 2 Vddf");
                if (err) {
                        gpio_free(N8X0_SLOT_SWITCH_GPIO);
-                       return err;
+                       return;
                }
                gpio_direction_output(N810_EMMC_VSD_GPIO, 0);
 
-               err = gpio_request(NN810_EMMC_VIO_GPIO, "MMC slot 2 Vdd");
+               err = gpio_request(N810_EMMC_VIO_GPIO, "MMC slot 2 Vdd");
                if (err) {
                        gpio_free(N8X0_SLOT_SWITCH_GPIO);
                        gpio_free(N810_EMMC_VSD_GPIO);
-                       return err;
+                       return;
                }
-               gpio_direction_output(NN810_EMMC_VIO_GPIO, 0);
+               gpio_direction_output(N810_EMMC_VIO_GPIO, 0);
        }
 
        mmc_data[0] = &mmc1_data;
index b1b88de..2d02632 100644 (file)
@@ -253,20 +253,20 @@ void __init sdp_flash_init(struct flash_partitions sdp_partition_info[])
        }
 
        if (norcs > GPMC_CS_NUM)
-               printk(KERN_INFO "OneNAND: Unable to find configuration "
-                               " in GPMC\n ");
+               printk(KERN_INFO "NOR: Unable to find configuration "
+                               "in GPMC\n");
        else
                board_nor_init(sdp_partition_info[0], norcs);
 
        if (onenandcs > GPMC_CS_NUM)
                printk(KERN_INFO "OneNAND: Unable to find configuration "
-                               " in GPMC\n ");
+                               "in GPMC\n");
        else
                board_onenand_init(sdp_partition_info[1], onenandcs);
 
        if (nandcs > GPMC_CS_NUM)
                printk(KERN_INFO "NAND: Unable to find configuration "
-                               " in GPMC\n ");
+                               "in GPMC\n");
        else
                board_nand_init(sdp_partition_info[2], nandcs);
 }
index bb4018b..e15d2e8 100644 (file)
@@ -96,7 +96,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
 
 static struct platform_device zoom_debugboard_serial_device = {
        .name                   = "serial8250",
-       .id                     = 3,
+       .id                     = PLAT8250_DEV_PLATFORM,
        .dev                    = {
                .platform_data  = serial_platform_data,
        },
index ca95d8d..6b39849 100644 (file)
@@ -280,7 +280,6 @@ static void enable_board_wakeup_source(void)
 void __init zoom_peripherals_init(void)
 {
        omap_i2c_init();
-       omap_serial_init();
        usb_musb_init(&musb_board_data);
        enable_board_wakeup_source();
 }
index d5153b6..9cba556 100644 (file)
@@ -895,7 +895,7 @@ static struct clk dpll4_m4x2_ck = {
        .ops            = &clkops_omap2_dflt_wait,
        .parent         = &dpll4_m4_ck,
        .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_CAM_SHIFT,
+       .enable_bit     = OMAP3430_PWRDN_DSS1_SHIFT,
        .flags          = INVERT_ENABLE,
        .clkdm_name     = "dpll4_clkdm",
        .recalc         = &omap3_clkoutx2_recalc,
index 28b1079..a5c0c9c 100644 (file)
@@ -2671,10 +2671,10 @@ static struct omap_clk omap44xx_clks[] = {
        CLK("omap-mcbsp.2",     "ick",                          &dummy_ck,      CK_443X),
        CLK("omap-mcbsp.3",     "ick",                          &dummy_ck,      CK_443X),
        CLK("omap-mcbsp.4",     "ick",                          &dummy_ck,      CK_443X),
-       CLK("omap-mcspi.1",     "ick",                          &dummy_ck,      CK_443X),
-       CLK("omap-mcspi.2",     "ick",                          &dummy_ck,      CK_443X),
-       CLK("omap-mcspi.3",     "ick",                          &dummy_ck,      CK_443X),
-       CLK("omap-mcspi.4",     "ick",                          &dummy_ck,      CK_443X),
+       CLK("omap2_mcspi.1",    "ick",                  &dummy_ck,      CK_443X),
+       CLK("omap2_mcspi.2",    "ick",                  &dummy_ck,      CK_443X),
+       CLK("omap2_mcspi.3",    "ick",                  &dummy_ck,      CK_443X),
+       CLK("omap2_mcspi.4",    "ick",                  &dummy_ck,      CK_443X),
        CLK(NULL,       "uart1_ick",                    &dummy_ck,      CK_443X),
        CLK(NULL,       "uart2_ick",                    &dummy_ck,      CK_443X),
        CLK(NULL,       "uart3_ick",                    &dummy_ck,      CK_443X),
index b87ad66..6e568ec 100644 (file)
@@ -240,7 +240,7 @@ static void _omap2_clkdm_set_hwsup(struct clockdomain *clkdm, int enable)
                        bits = OMAP24XX_CLKSTCTRL_ENABLE_AUTO;
                else
                        bits = OMAP24XX_CLKSTCTRL_DISABLE_AUTO;
-       } else if (cpu_is_omap34xx() | cpu_is_omap44xx()) {
+       } else if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
                if (enable)
                        bits = OMAP34XX_CLKSTCTRL_ENABLE_AUTO;
                else
@@ -812,7 +812,7 @@ int omap2_clkdm_sleep(struct clockdomain *clkdm)
                cm_set_mod_reg_bits(OMAP24XX_FORCESTATE,
                            clkdm->pwrdm.ptr->prcm_offs, OMAP2_PM_PWSTCTRL);
 
-       } else if (cpu_is_omap34xx() | cpu_is_omap44xx()) {
+       } else if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
 
                u32 bits = (OMAP34XX_CLKSTCTRL_FORCE_SLEEP <<
                         __ffs(clkdm->clktrctrl_mask));
@@ -856,7 +856,7 @@ int omap2_clkdm_wakeup(struct clockdomain *clkdm)
                cm_clear_mod_reg_bits(OMAP24XX_FORCESTATE,
                              clkdm->pwrdm.ptr->prcm_offs, OMAP2_PM_PWSTCTRL);
 
-       } else if (cpu_is_omap34xx() | cpu_is_omap44xx()) {
+       } else if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
 
                u32 bits = (OMAP34XX_CLKSTCTRL_FORCE_WAKEUP <<
                         __ffs(clkdm->clktrctrl_mask));
index 23e4d77..2271b9b 100644 (file)
@@ -726,7 +726,7 @@ void __init omap2_init_mmc(struct omap_mmc_platform_data **mmc_data,
                        if (!cpu_is_omap44xx())
                                return;
                        base = OMAP4_MMC5_BASE + OMAP4_MMC_REG_OFFSET;
-                       irq = OMAP44XX_IRQ_MMC4;
+                       irq = OMAP44XX_IRQ_MMC5;
                        break;
                default:
                        continue;
index 64d74f0..e57fb29 100644 (file)
@@ -39,6 +39,9 @@ static int omap2_nand_gpmc_retime(void)
        struct gpmc_timings t;
        int err;
 
+       if (!gpmc_nand_data->gpmc_t)
+               return 0;
+
        memset(&t, 0, sizeof(t));
        t.sync_clk = gpmc_round_ns_to_ticks(gpmc_nand_data->gpmc_t->sync_clk);
        t.cs_on = gpmc_round_ns_to_ticks(gpmc_nand_data->gpmc_t->cs_on);
index ff25c7e..50fd749 100644 (file)
@@ -52,7 +52,7 @@ omap_irq_base:        .word   0
 
                mrc     p15, 0, \tmp, c0, c0, 0 @ get processor revision
                and     \tmp, \tmp, #0x000f0000 @ only check architecture
-               cmp     \tmp, #0x00060000       @ is v6?
+               cmp     \tmp, #0x00070000       @ is v6?
                beq     2400f                   @ found v6 so it's omap24xx
                mrc     p15, 0, \tmp, c0, c0, 0 @ get processor revision
                and     \tmp, \tmp, #0x000000f0 @ check cortex 8 or 9
index aa3f65c..ef0e7a0 100644 (file)
@@ -33,7 +33,7 @@
 ENTRY(omap_secondary_startup)
 hold:  ldr     r12,=0x103
        dsb
-       smc                             @ read from AuxCoreBoot0
+       smc     #0                      @ read from AuxCoreBoot0
        mov     r0, r0, lsr #9
        mrc     p15, 0, r4, c0, c0, 5
        and     r4, r4, #0x0f
@@ -52,7 +52,7 @@ ENTRY(omap_modify_auxcoreboot0)
        stmfd   sp!, {r1-r12, lr}
        ldr     r12, =0x104
        dsb
-       smc
+       smc     #0
        ldmfd   sp!, {r1-r12, pc}
 END(omap_modify_auxcoreboot0)
 
@@ -60,6 +60,6 @@ ENTRY(omap_auxcoreboot_addr)
        stmfd   sp!, {r2-r12, lr}
        ldr     r12, =0x105
        dsb
-       smc
+       smc     #0
        ldmfd   sp!, {r2-r12, pc}
 END(omap_auxcoreboot_addr)
index 89bb2b1..f61c777 100644 (file)
@@ -27,6 +27,6 @@ ENTRY(omap_smc1)
        mov     r12, r0
        mov     r0, r1
        dsb
-       smc
+       smc     #0
        ldmfd   sp!, {r2-r12, pc}
 END(omap_smc1)
index c664947..e436dcb 100644 (file)
@@ -1511,6 +1511,9 @@ struct powerdomain *omap_hwmod_get_pwrdm(struct omap_hwmod *oh)
                c = oh->slaves[oh->_mpu_port_index]->_clk;
        }
 
+       if (!c->clkdm)
+               return NULL;
+
        return c->clkdm->pwrdm.ptr;
 
 }
index 9a0fb38..ebfce7d 100644 (file)
@@ -222,7 +222,7 @@ void pwrdm_init(struct powerdomain **pwrdm_list)
 {
        struct powerdomain **p = NULL;
 
-       if (cpu_is_omap24xx() | cpu_is_omap34xx()) {
+       if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
                pwrstctrl_reg_offs = OMAP2_PM_PWSTCTRL;
                pwrstst_reg_offs = OMAP2_PM_PWSTST;
        } else if (cpu_is_omap44xx()) {
index 9537f6f..07a60f1 100644 (file)
@@ -123,7 +123,7 @@ struct omap3_prcm_regs prcm_context;
 u32 omap_prcm_get_reset_sources(void)
 {
        /* XXX This presumably needs modification for 34XX */
-       if (cpu_is_omap24xx() | cpu_is_omap34xx())
+       if (cpu_is_omap24xx() || cpu_is_omap34xx())
                return prm_read_mod_reg(WKUP_MOD, OMAP2_RM_RSTST) & 0x7f;
        if (cpu_is_omap44xx())
                return prm_read_mod_reg(WKUP_MOD, OMAP4_RM_RSTST) & 0x7f;
@@ -157,7 +157,7 @@ void omap_prcm_arch_reset(char mode, const char *cmd)
        else
                WARN_ON(1);
 
-       if (cpu_is_omap24xx() | cpu_is_omap34xx())
+       if (cpu_is_omap24xx() || cpu_is_omap34xx())
                prm_set_mod_reg_bits(OMAP_RST_DPLL3, prcm_offs,
                                                 OMAP2_RM_RSTCTRL);
        if (cpu_is_omap44xx())
index da77930..3771254 100644 (file)
@@ -115,7 +115,6 @@ static struct plat_serial8250_port serial_platform_data2[] = {
        }
 };
 
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
 static struct plat_serial8250_port serial_platform_data3[] = {
        {
                .irq            = 70,
@@ -128,23 +127,12 @@ static struct plat_serial8250_port serial_platform_data3[] = {
        }
 };
 
-static inline void omap2_set_globals_uart4(struct omap_globals *omap2_globals)
-{
-       serial_platform_data3[0].mapbase = omap2_globals->uart4_phys;
-}
-#else
-static inline void omap2_set_globals_uart4(struct omap_globals *omap2_globals)
-{
-}
-#endif
-
 void __init omap2_set_globals_uart(struct omap_globals *omap2_globals)
 {
        serial_platform_data0[0].mapbase = omap2_globals->uart1_phys;
        serial_platform_data1[0].mapbase = omap2_globals->uart2_phys;
        serial_platform_data2[0].mapbase = omap2_globals->uart3_phys;
-       if (cpu_is_omap3630() || cpu_is_omap44xx())
-               omap2_set_globals_uart4(omap2_globals);
+       serial_platform_data3[0].mapbase = omap2_globals->uart4_phys;
 }
 
 static inline unsigned int __serial_read_reg(struct uart_port *up,
@@ -550,7 +538,7 @@ static ssize_t sleep_timeout_store(struct device *dev,
        unsigned int value;
 
        if (sscanf(buf, "%u", &value) != 1) {
-               printk(KERN_ERR "sleep_timeout_store: Invalid value\n");
+               dev_err(dev, "sleep_timeout_store: Invalid value\n");
                return -EINVAL;
        }
 
@@ -664,27 +652,33 @@ void __init omap_serial_early_init(void)
                struct device *dev = &pdev->dev;
                struct plat_serial8250_port *p = dev->platform_data;
 
+               /* Don't map zero-based physical address */
+               if (p->mapbase == 0) {
+                       dev_warn(dev, "no physical address for uart#%d,"
+                                " so skipping early_init...\n", i);
+                       continue;
+               }
                /*
                 * Module 4KB + L4 interconnect 4KB
                 * Static mapping, never released
                 */
                p->membase = ioremap(p->mapbase, SZ_8K);
                if (!p->membase) {
-                       printk(KERN_ERR "ioremap failed for uart%i\n", i + 1);
+                       dev_err(dev, "ioremap failed for uart%i\n", i + 1);
                        continue;
                }
 
                sprintf(name, "uart%d_ick", i + 1);
                uart->ick = clk_get(NULL, name);
                if (IS_ERR(uart->ick)) {
-                       printk(KERN_ERR "Could not get uart%d_ick\n", i + 1);
+                       dev_err(dev, "Could not get uart%d_ick\n", i + 1);
                        uart->ick = NULL;
                }
 
                sprintf(name, "uart%d_fck", i+1);
                uart->fck = clk_get(NULL, name);
                if (IS_ERR(uart->fck)) {
-                       printk(KERN_ERR "Could not get uart%d_fck\n", i + 1);
+                       dev_err(dev, "Could not get uart%d_fck\n", i + 1);
                        uart->fck = NULL;
                }
 
@@ -727,6 +721,13 @@ void __init omap_serial_init_port(int port)
        pdev = &uart->pdev;
        dev = &pdev->dev;
 
+       /* Don't proceed if there's no clocks available */
+       if (unlikely(!uart->ick || !uart->fck)) {
+               WARN(1, "%s: can't init uart%d, no clocks available\n",
+                    kobject_name(&dev->kobj), port);
+               return;
+       }
+
        omap_uart_enable_clocks(uart);
 
        omap_uart_reset(uart);
index 088c1a0..f12f0e3 100644 (file)
@@ -44,9 +44,6 @@
 
 #define NO_LENGTH_CHECK 0xffffffff
 
-unsigned char omap_bootloader_tag[512];
-int omap_bootloader_tag_len;
-
 struct omap_board_config_kernel *omap_board_config;
 int omap_board_config_size;
 
@@ -100,10 +97,17 @@ EXPORT_SYMBOL(omap_get_var_config);
 
 #include <linux/clocksource.h>
 
+/*
+ * offset_32k holds the init time counter value. It is then subtracted
+ * from every counter read to achieve a counter that counts time from the
+ * kernel boot (needed for sched_clock()).
+ */
+static u32 offset_32k __read_mostly;
+
 #ifdef CONFIG_ARCH_OMAP16XX
 static cycle_t omap16xx_32k_read(struct clocksource *cs)
 {
-       return omap_readl(OMAP16XX_TIMER_32K_SYNCHRONIZED);
+       return omap_readl(OMAP16XX_TIMER_32K_SYNCHRONIZED) - offset_32k;
 }
 #else
 #define omap16xx_32k_read      NULL
@@ -112,7 +116,7 @@ static cycle_t omap16xx_32k_read(struct clocksource *cs)
 #ifdef CONFIG_ARCH_OMAP2420
 static cycle_t omap2420_32k_read(struct clocksource *cs)
 {
-       return omap_readl(OMAP2420_32KSYNCT_BASE + 0x10);
+       return omap_readl(OMAP2420_32KSYNCT_BASE + 0x10) - offset_32k;
 }
 #else
 #define omap2420_32k_read      NULL
@@ -121,7 +125,7 @@ static cycle_t omap2420_32k_read(struct clocksource *cs)
 #ifdef CONFIG_ARCH_OMAP2430
 static cycle_t omap2430_32k_read(struct clocksource *cs)
 {
-       return omap_readl(OMAP2430_32KSYNCT_BASE + 0x10);
+       return omap_readl(OMAP2430_32KSYNCT_BASE + 0x10) - offset_32k;
 }
 #else
 #define omap2430_32k_read      NULL
@@ -130,7 +134,7 @@ static cycle_t omap2430_32k_read(struct clocksource *cs)
 #ifdef CONFIG_ARCH_OMAP3
 static cycle_t omap34xx_32k_read(struct clocksource *cs)
 {
-       return omap_readl(OMAP3430_32KSYNCT_BASE + 0x10);
+       return omap_readl(OMAP3430_32KSYNCT_BASE + 0x10) - offset_32k;
 }
 #else
 #define omap34xx_32k_read      NULL
@@ -139,7 +143,7 @@ static cycle_t omap34xx_32k_read(struct clocksource *cs)
 #ifdef CONFIG_ARCH_OMAP4
 static cycle_t omap44xx_32k_read(struct clocksource *cs)
 {
-       return omap_readl(OMAP4430_32KSYNCT_BASE + 0x10);
+       return omap_readl(OMAP4430_32KSYNCT_BASE + 0x10) - offset_32k;
 }
 #else
 #define omap44xx_32k_read      NULL
@@ -227,6 +231,8 @@ static int __init omap_init_clocksource_32k(void)
                clocksource_32k.mult = clocksource_hz2mult(32768,
                                            clocksource_32k.shift);
 
+               offset_32k = clocksource_32k.read(&clocksource_32k);
+
                if (clocksource_register(&clocksource_32k))
                        printk(err, clocksource_32k.name);
        }
index 5c6c342..1d95996 100644 (file)
@@ -937,6 +937,15 @@ void omap_start_dma(int lch)
 {
        u32 l;
 
+       /*
+        * The CPC/CDAC register needs to be initialized to zero
+        * before starting dma transfer.
+        */
+       if (cpu_is_omap15xx())
+               dma_write(0, CPC(lch));
+       else
+               dma_write(0, CDAC(lch));
+
        if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
                int next_lch, cur_lch;
                char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
index 76a347b..45a225d 100644 (file)
@@ -798,7 +798,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
        case METHOD_MPUIO:
                reg += OMAP_MPUIO_GPIO_INT_EDGE;
                l = __raw_readl(reg);
-               if (trigger & IRQ_TYPE_EDGE_BOTH)
+               if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
                        bank->toggle_mask |= 1 << gpio;
                if (trigger & IRQ_TYPE_EDGE_RISING)
                        l |= 1 << gpio;
@@ -812,7 +812,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
        case METHOD_GPIO_1510:
                reg += OMAP1510_GPIO_INT_CONTROL;
                l = __raw_readl(reg);
-               if (trigger & IRQ_TYPE_EDGE_BOTH)
+               if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
                        bank->toggle_mask |= 1 << gpio;
                if (trigger & IRQ_TYPE_EDGE_RISING)
                        l |= 1 << gpio;
@@ -846,7 +846,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
        case METHOD_GPIO_7XX:
                reg += OMAP7XX_GPIO_INT_CONTROL;
                l = __raw_readl(reg);
-               if (trigger & IRQ_TYPE_EDGE_BOTH)
+               if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
                        bank->toggle_mask |= 1 << gpio;
                if (trigger & IRQ_TYPE_EDGE_RISING)
                        l |= 1 << gpio;
index b65088a..4017019 100644 (file)
 #define INT_34XX_MMC3_IRQ      94
 #define INT_34XX_GPT12_IRQ     95
 
-#define        INT_34XX_BENCH_MPU_EMUL 3
-
 #define INT_35XX_HECC0_IRQ             24
 #define INT_35XX_HECC1_IRQ             28
 #define INT_35XX_EMAC_C0_RXTHRESH_IRQ  67
index 3974835..7de903d 100644 (file)
@@ -59,7 +59,7 @@
 #define OMAP44XX_MCBSP1_BASE   0x49022000
 #define OMAP44XX_MCBSP2_BASE   0x49024000
 #define OMAP44XX_MCBSP3_BASE   0x49026000
-#define OMAP44XX_MCBSP4_BASE   0x48074000
+#define OMAP44XX_MCBSP4_BASE   0x48096000
 
 #if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
 
index 6ba88d2..f8efd54 100644 (file)
@@ -29,4 +29,11 @@ struct omap_nand_platform_data {
 /* size (4 KiB) for IO mapping */
 #define        NAND_IO_SIZE    SZ_4K
 
+#if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE)
 extern int gpmc_nand_init(struct omap_nand_platform_data *d);
+#else
+static inline int gpmc_nand_init(struct omap_nand_platform_data *d)
+{
+       return 0;
+}
+#endif
index 2302474..b3ef1a7 100644 (file)
@@ -32,7 +32,7 @@
 #define OMAP4430_PRM_BASE              0x4a306000
 #define OMAP44XX_GPMC_BASE             0x50000000
 #define OMAP443X_SCM_BASE              0x4a002000
-#define OMAP443X_CTRL_BASE             OMAP443X_SCM_BASE
+#define OMAP443X_CTRL_BASE             0x4a100000
 #define OMAP44XX_IC_BASE               0x48200000
 #define OMAP44XX_IVA_INTC_BASE         0x40000000
 #define IRQ_SIR_IRQ                    0x0040
index 440b416..36d6ea5 100644 (file)
@@ -294,8 +294,8 @@ struct omap_hwmod_class_sysconfig {
        u16 rev_offs;
        u16 sysc_offs;
        u16 syss_offs;
+       u16 sysc_flags;
        u8 idlemodes;
-       u8 sysc_flags;
        u8 clockact;
        struct omap_hwmod_sysc_fields *sysc_fields;
 };
index dd5b882..5e73c25 100644 (file)
@@ -28,7 +28,7 @@ static struct pt_regs *get_user_regs(struct task_struct *tsk)
                                  THREAD_SIZE - sizeof(struct pt_regs));
 }
 
-static void user_enable_single_step(struct task_struct *tsk)
+void user_enable_single_step(struct task_struct *tsk)
 {
        pr_debug("user_enable_single_step: pid=%u, PC=0x%08lx, SR=0x%08lx\n",
                 tsk->pid, task_pt_regs(tsk)->pc, task_pt_regs(tsk)->sr);
index 64cda95..7a656bd 100644 (file)
@@ -6,6 +6,7 @@
 #include "linux/irqreturn.h"
 #include "linux/kd.h"
 #include "linux/sched.h"
+#include "linux/slab.h"
 #include "chan_kern.h"
 #include "irq_kern.h"
 #include "irq_user.h"
index 06d6ccf..b6b1096 100644 (file)
@@ -8,7 +8,6 @@
 #include <errno.h>
 #include <sched.h>
 #include <linux/limits.h>
-#include <linux/slab.h>
 #include <sys/socket.h>
 #include <sys/wait.h>
 #include "kern_constants.h"
index ff469e4..a353475 100644 (file)
@@ -429,7 +429,7 @@ static int apbt_cpuhp_notify(struct notifier_block *n,
 
 static __init int apbt_late_init(void)
 {
-       if (disable_apbt_percpu)
+       if (disable_apbt_percpu || !apb_timer_block_enabled)
                return 0;
        /* This notifier should be called after workqueue is ready */
        hotcpu_notifier(apbt_cpuhp_notify, -20);
index 7e1cca1..1366c7c 100644 (file)
@@ -47,6 +47,27 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
                (c->x86 == 0x6 && c->x86_model >= 0x0e))
                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 
+       /*
+        * Atom erratum AAE44/AAF40/AAG38/AAH41:
+        *
+        * A race condition between speculative fetches and invalidating
+        * a large page.  This is worked around in microcode, but we
+        * need the microcode to have already been loaded... so if it is
+        * not, recommend a BIOS update and disable large pages.
+        */
+       if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2) {
+               u32 ucode, junk;
+
+               wrmsr(MSR_IA32_UCODE_REV, 0, 0);
+               sync_core();
+               rdmsr(MSR_IA32_UCODE_REV, junk, ucode);
+
+               if (ucode < 0x20e) {
+                       printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
+                       clear_cpu_cap(c, X86_FEATURE_PSE);
+               }
+       }
+
 #ifdef CONFIG_X86_64
        set_cpu_cap(c, X86_FEATURE_SYSENTER32);
 #else
index 1cbed97..dfdb4db 100644 (file)
@@ -22,6 +22,7 @@
  */
 
 #include <linux/dmi.h>
+#include <linux/module.h>
 #include <asm/div64.h>
 #include <asm/vmware.h>
 #include <asm/x86_init.h>
@@ -101,6 +102,7 @@ int vmware_platform(void)
 
        return 0;
 }
+EXPORT_SYMBOL(vmware_platform);
 
 /*
  * VMware hypervisor takes care of exporting a reliable TSC to the guest.
index dc9690b..17cb329 100644 (file)
@@ -276,12 +276,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
 
        set_tsk_thread_flag(p, TIF_FORK);
 
-       p->thread.fs = me->thread.fs;
-       p->thread.gs = me->thread.gs;
        p->thread.io_bitmap_ptr = NULL;
 
        savesegment(gs, p->thread.gsindex);
+       p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
        savesegment(fs, p->thread.fsindex);
+       p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
        savesegment(es, p->thread.es);
        savesegment(ds, p->thread.ds);
 
index c7b1ebf..31930fd 100644 (file)
@@ -66,14 +66,44 @@ resource_to_addr(struct acpi_resource *resource,
                        struct acpi_resource_address64 *addr)
 {
        acpi_status status;
-
-       status = acpi_resource_to_address64(resource, addr);
-       if (ACPI_SUCCESS(status) &&
-           (addr->resource_type == ACPI_MEMORY_RANGE ||
-           addr->resource_type == ACPI_IO_RANGE) &&
-           addr->address_length > 0 &&
-           addr->producer_consumer == ACPI_PRODUCER) {
+       struct acpi_resource_memory24 *memory24;
+       struct acpi_resource_memory32 *memory32;
+       struct acpi_resource_fixed_memory32 *fixed_memory32;
+
+       memset(addr, 0, sizeof(*addr));
+       switch (resource->type) {
+       case ACPI_RESOURCE_TYPE_MEMORY24:
+               memory24 = &resource->data.memory24;
+               addr->resource_type = ACPI_MEMORY_RANGE;
+               addr->minimum = memory24->minimum;
+               addr->address_length = memory24->address_length;
+               addr->maximum = addr->minimum + addr->address_length - 1;
+               return AE_OK;
+       case ACPI_RESOURCE_TYPE_MEMORY32:
+               memory32 = &resource->data.memory32;
+               addr->resource_type = ACPI_MEMORY_RANGE;
+               addr->minimum = memory32->minimum;
+               addr->address_length = memory32->address_length;
+               addr->maximum = addr->minimum + addr->address_length - 1;
                return AE_OK;
+       case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+               fixed_memory32 = &resource->data.fixed_memory32;
+               addr->resource_type = ACPI_MEMORY_RANGE;
+               addr->minimum = fixed_memory32->address;
+               addr->address_length = fixed_memory32->address_length;
+               addr->maximum = addr->minimum + addr->address_length - 1;
+               return AE_OK;
+       case ACPI_RESOURCE_TYPE_ADDRESS16:
+       case ACPI_RESOURCE_TYPE_ADDRESS32:
+       case ACPI_RESOURCE_TYPE_ADDRESS64:
+               status = acpi_resource_to_address64(resource, addr);
+               if (ACPI_SUCCESS(status) &&
+                   (addr->resource_type == ACPI_MEMORY_RANGE ||
+                   addr->resource_type == ACPI_IO_RANGE) &&
+                   addr->address_length > 0) {
+                       return AE_OK;
+               }
+               break;
        }
        return AE_ERROR;
 }
@@ -91,30 +121,6 @@ count_resource(struct acpi_resource *acpi_res, void *data)
        return AE_OK;
 }
 
-static void
-align_resource(struct acpi_device *bridge, struct resource *res)
-{
-       int align = (res->flags & IORESOURCE_MEM) ? 16 : 4;
-
-       /*
-        * Host bridge windows are not BARs, but the decoders on the PCI side
-        * that claim this address space have starting alignment and length
-        * constraints, so fix any obvious BIOS goofs.
-        */
-       if (!IS_ALIGNED(res->start, align)) {
-               dev_printk(KERN_DEBUG, &bridge->dev,
-                          "host bridge window %pR invalid; "
-                          "aligning start to %d-byte boundary\n", res, align);
-               res->start &= ~(align - 1);
-       }
-       if (!IS_ALIGNED(res->end + 1, align)) {
-               dev_printk(KERN_DEBUG, &bridge->dev,
-                          "host bridge window %pR invalid; "
-                          "aligning end to %d-byte boundary\n", res, align);
-               res->end = ALIGN(res->end, align) - 1;
-       }
-}
-
 static acpi_status
 setup_resource(struct acpi_resource *acpi_res, void *data)
 {
@@ -124,7 +130,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
        acpi_status status;
        unsigned long flags;
        struct resource *root, *conflict;
-       u64 start, end, max_len;
+       u64 start, end;
 
        status = resource_to_addr(acpi_res, &addr);
        if (!ACPI_SUCCESS(status))
@@ -141,19 +147,8 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
        } else
                return AE_OK;
 
-       max_len = addr.maximum - addr.minimum + 1;
-       if (addr.address_length > max_len) {
-               dev_printk(KERN_DEBUG, &info->bridge->dev,
-                          "host bridge window length %#llx doesn't fit in "
-                          "%#llx-%#llx, trimming\n",
-                          (unsigned long long) addr.address_length,
-                          (unsigned long long) addr.minimum,
-                          (unsigned long long) addr.maximum);
-               addr.address_length = max_len;
-       }
-
        start = addr.minimum + addr.translation_offset;
-       end = start + addr.address_length - 1;
+       end = addr.maximum + addr.translation_offset;
 
        res = &info->res[info->res_num];
        res->name = info->name;
@@ -161,7 +156,6 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
        res->start = start;
        res->end = end;
        res->child = NULL;
-       align_resource(info->bridge, res);
 
        if (!pci_use_crs) {
                dev_printk(KERN_DEBUG, &info->bridge->dev,
index 46fd43f..97da2ba 100644 (file)
@@ -72,6 +72,9 @@ pcibios_align_resource(void *data, const struct resource *res,
                        return start;
                if (start & 0x300)
                        start = (start + 0x3ff) & ~0x3ff;
+       } else if (res->flags & IORESOURCE_MEM) {
+               if (start < BIOS_END)
+                       start = BIOS_END;
        }
        return start;
 }
index 1ba7e0a..4f0c06c 100644 (file)
@@ -109,6 +109,7 @@ void blk_rq_timed_out_timer(unsigned long data)
        struct request_queue *q = (struct request_queue *) data;
        unsigned long flags, next = 0;
        struct request *rq, *tmp;
+       int next_set = 0;
 
        spin_lock_irqsave(q->queue_lock, flags);
 
@@ -122,16 +123,13 @@ void blk_rq_timed_out_timer(unsigned long data)
                        if (blk_mark_rq_complete(rq))
                                continue;
                        blk_rq_timed_out(rq);
-               } else if (!next || time_after(next, rq->deadline))
+               } else if (!next_set || time_after(next, rq->deadline)) {
                        next = rq->deadline;
+                       next_set = 1;
+               }
        }
 
-       /*
-        * next can never be 0 here with the list non-empty, since we always
-        * bump ->deadline to 1 so we can detect if the timer was ever added
-        * or not. See comment in blk_add_timer()
-        */
-       if (next)
+       if (next_set)
                mod_timer(&q->timeout, round_jiffies_up(next));
 
        spin_unlock_irqrestore(q->queue_lock, flags);
index 2bb7348..05eb32e 100644 (file)
@@ -46,6 +46,12 @@ struct authenc_request_ctx {
        char tail[];
 };
 
+static void authenc_request_complete(struct aead_request *req, int err)
+{
+       if (err != -EINPROGRESS)
+               aead_request_complete(req, err);
+}
+
 static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
                                 unsigned int keylen)
 {
@@ -142,7 +148,7 @@ static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
                                 crypto_aead_authsize(authenc), 1);
 
 out:
-       aead_request_complete(req, err);
+       authenc_request_complete(req, err);
 }
 
 static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
@@ -208,7 +214,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
        err = crypto_ablkcipher_decrypt(abreq);
 
 out:
-       aead_request_complete(req, err);
+       authenc_request_complete(req, err);
 }
 
 static void authenc_verify_ahash_done(struct crypto_async_request *areq,
@@ -245,7 +251,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
        err = crypto_ablkcipher_decrypt(abreq);
 
 out:
-       aead_request_complete(req, err);
+       authenc_request_complete(req, err);
 }
 
 static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags)
@@ -379,7 +385,7 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
                err = crypto_authenc_genicv(areq, iv, 0);
        }
 
-       aead_request_complete(areq, err);
+       authenc_request_complete(areq, err);
 }
 
 static int crypto_authenc_encrypt(struct aead_request *req)
@@ -420,7 +426,7 @@ static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
                err = crypto_authenc_genicv(areq, greq->giv, 0);
        }
 
-       aead_request_complete(areq, err);
+       authenc_request_complete(areq, err);
 }
 
 static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
index 9f6cfac..228740f 100644 (file)
@@ -879,6 +879,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
+       struct request_queue *q = qc->scsicmd->device->request_queue;
+       unsigned long flags;
 
        WARN_ON(!ap->ops->error_handler);
 
@@ -890,7 +892,9 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
         * Note that ATA_QCFLAG_FAILED is unconditionally set after
         * this function completes.
         */
+       spin_lock_irqsave(q->queue_lock, flags);
        blk_abort_request(qc->scsicmd->request);
+       spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
 /**
@@ -1624,6 +1628,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
        }
 
        /* okay, this error is ours */
+       memset(&tf, 0, sizeof(tf));
        rc = ata_eh_read_log_10h(dev, &tag, &tf);
        if (rc) {
                ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
index 3c3172d..4164dd2 100644 (file)
@@ -424,6 +424,8 @@ static struct pcmcia_device_id pcmcia_devices[] = {
        PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
        PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
        PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
+       PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17),
+       PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
        PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
        PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2      ", 0x547e66dc, 0x8671043b),
        PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
@@ -444,6 +446,8 @@ static struct pcmcia_device_id pcmcia_devices[] = {
        PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
        PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
        PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
+       PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d),
+       PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
        PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
        PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
        PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
index 67e0fc5..93d1f9b 100644 (file)
@@ -1695,6 +1695,7 @@ int drbd_send_protocol(struct drbd_conf *mdev)
                        cf |= CF_DRY_RUN;
                else {
                        dev_err(DEV, "--dry-run is not supported by peer");
+                       kfree(p);
                        return 0;
                }
        }
index ed9f1de..3f096e7 100644 (file)
@@ -899,7 +899,8 @@ retry:
 
        drbd_thread_start(&mdev->asender);
 
-       drbd_send_protocol(mdev);
+       if (!drbd_send_protocol(mdev))
+               return -1;
        drbd_send_sync_param(mdev, &mdev->sync_conf);
        drbd_send_sizes(mdev, 0);
        drbd_send_uuids(mdev);
index ddf1942..8a549db 100644 (file)
@@ -48,6 +48,7 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
+#include <linux/compat.h>
 #include <linux/kthread.h>
 #include <linux/errno.h>
 #include <linux/spinlock.h>
@@ -2984,7 +2985,7 @@ static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
        mutex_unlock(&ctl_mutex);
 }
 
-static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        void __user *argp = (void __user *)arg;
        struct pkt_ctrl_command ctrl_cmd;
@@ -3021,10 +3022,20 @@ static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cm
        return ret;
 }
 
+#ifdef CONFIG_COMPAT
+static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
 
 static const struct file_operations pkt_ctl_fops = {
-       .ioctl   = pkt_ctl_ioctl,
-       .owner   = THIS_MODULE,
+       .open           = nonseekable_open,
+       .unlocked_ioctl = pkt_ctl_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = pkt_ctl_compat_ioctl,
+#endif
+       .owner          = THIS_MODULE,
 };
 
 static struct miscdevice pkt_misc = {
index 2d5d575..75d293e 100644 (file)
@@ -1113,6 +1113,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
        unsigned int cpu = sys_dev->id;
        unsigned long flags;
        struct cpufreq_policy *data;
+       struct kobject *kobj;
+       struct completion *cmp;
 #ifdef CONFIG_SMP
        struct sys_device *cpu_sys_dev;
        unsigned int j;
@@ -1141,10 +1143,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
                dprintk("removing link\n");
                cpumask_clear_cpu(cpu, data->cpus);
                spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-               sysfs_remove_link(&sys_dev->kobj, "cpufreq");
+               kobj = &sys_dev->kobj;
                cpufreq_cpu_put(data);
                cpufreq_debug_enable_ratelimit();
                unlock_policy_rwsem_write(cpu);
+               sysfs_remove_link(kobj, "cpufreq");
                return 0;
        }
 #endif
@@ -1181,7 +1184,10 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
                                data->governor->name, CPUFREQ_NAME_LEN);
 #endif
                        cpu_sys_dev = get_cpu_sysdev(j);
-                       sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
+                       kobj = &cpu_sys_dev->kobj;
+                       unlock_policy_rwsem_write(cpu);
+                       sysfs_remove_link(kobj, "cpufreq");
+                       lock_policy_rwsem_write(cpu);
                        cpufreq_cpu_put(data);
                }
        }
@@ -1192,19 +1198,22 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
        if (cpufreq_driver->target)
                __cpufreq_governor(data, CPUFREQ_GOV_STOP);
 
-       kobject_put(&data->kobj);
+       kobj = &data->kobj;
+       cmp = &data->kobj_unregister;
+       unlock_policy_rwsem_write(cpu);
+       kobject_put(kobj);
 
        /* we need to make sure that the underlying kobj is actually
         * not referenced anymore by anybody before we proceed with
         * unloading.
         */
        dprintk("waiting for dropping of refcount\n");
-       wait_for_completion(&data->kobj_unregister);
+       wait_for_completion(cmp);
        dprintk("wait complete\n");
 
+       lock_policy_rwsem_write(cpu);
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(data);
-
        unlock_policy_rwsem_write(cpu);
 
        free_cpumask_var(data->related_cpus);
index 599a40b..3a14787 100644 (file)
@@ -444,6 +444,7 @@ static struct attribute_group dbs_attr_group_old = {
 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
 {
        unsigned int load = 0;
+       unsigned int max_load = 0;
        unsigned int freq_target;
 
        struct cpufreq_policy *policy;
@@ -501,6 +502,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
                        continue;
 
                load = 100 * (wall_time - idle_time) / wall_time;
+
+               if (load > max_load)
+                       max_load = load;
        }
 
        /*
@@ -511,7 +515,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
                return;
 
        /* Check for frequency increase */
-       if (load > dbs_tuners_ins.up_threshold) {
+       if (max_load > dbs_tuners_ins.up_threshold) {
                this_dbs_info->down_skip = 0;
 
                /* if we are already at full speed then break out early */
@@ -538,7 +542,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
         * can support the current CPU usage without triggering the up
         * policy. To be safe, we focus 10 points under the threshold.
         */
-       if (load < (dbs_tuners_ins.down_threshold - 10)) {
+       if (max_load < (dbs_tuners_ins.down_threshold - 10)) {
                freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
 
                this_dbs_info->requested_freq -= freq_target;
index 3784a47..8f5aebf 100644 (file)
@@ -190,7 +190,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
        for (try = 0; try < 5; try++) {
                new = allocate ? old - bandwidth : old + bandwidth;
                if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
-                       break;
+                       return -EBUSY;
 
                data[0] = cpu_to_be32(old);
                data[1] = cpu_to_be32(new);
@@ -218,7 +218,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
                u32 channels_mask, u64 offset, bool allocate, __be32 data[2])
 {
        __be32 c, all, old;
-       int i, retry = 5;
+       int i, ret = -EIO, retry = 5;
 
        old = all = allocate ? cpu_to_be32(~0) : 0;
 
@@ -226,6 +226,8 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
                if (!(channels_mask & 1 << i))
                        continue;
 
+               ret = -EBUSY;
+
                c = cpu_to_be32(1 << (31 - i));
                if ((old & c) != (all & c))
                        continue;
@@ -251,12 +253,16 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
 
                        /* 1394-1995 IRM, fall through to retry. */
                default:
-                       if (retry--)
+                       if (retry) {
+                               retry--;
                                i--;
+                       } else {
+                               ret = -EIO;
+                       }
                }
        }
 
-       return -EIO;
+       return ret;
 }
 
 static void deallocate_channel(struct fw_card *card, int irm_id,
index 0cf4d7f..94b16e0 100644 (file)
@@ -1158,7 +1158,7 @@ static void handle_local_lock(struct fw_ohci *ohci,
                              struct fw_packet *packet, u32 csr)
 {
        struct fw_packet response;
-       int tcode, length, ext_tcode, sel;
+       int tcode, length, ext_tcode, sel, try;
        __be32 *payload, lock_old;
        u32 lock_arg, lock_data;
 
@@ -1185,21 +1185,26 @@ static void handle_local_lock(struct fw_ohci *ohci,
        reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
        reg_write(ohci, OHCI1394_CSRControl, sel);
 
-       if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
-               lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
-       else
-               fw_notify("swap not done yet\n");
+       for (try = 0; try < 20; try++)
+               if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
+                       lock_old = cpu_to_be32(reg_read(ohci,
+                                                       OHCI1394_CSRData));
+                       fw_fill_response(&response, packet->header,
+                                        RCODE_COMPLETE,
+                                        &lock_old, sizeof(lock_old));
+                       goto out;
+               }
+
+       fw_error("swap not done (CSR lock timeout)\n");
+       fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
 
-       fw_fill_response(&response, packet->header,
-                        RCODE_COMPLETE, &lock_old, sizeof(lock_old));
  out:
        fw_core_handle_response(&ohci->card, &response);
 }
 
 static void handle_local_request(struct context *ctx, struct fw_packet *packet)
 {
-       u64 offset;
-       u32 csr;
+       u64 offset, csr;
 
        if (ctx == &ctx->ohci->at_request_ctx) {
                packet->ack = ACK_PENDING;
index 7d521e1..b827c97 100644 (file)
@@ -252,6 +252,18 @@ static void pca953x_irq_bus_lock(unsigned int irq)
 static void pca953x_irq_bus_sync_unlock(unsigned int irq)
 {
        struct pca953x_chip *chip = get_irq_chip_data(irq);
+       uint16_t new_irqs;
+       uint16_t level;
+
+       /* Look for any newly setup interrupt */
+       new_irqs = chip->irq_trig_fall | chip->irq_trig_raise;
+       new_irqs &= ~chip->reg_direction;
+
+       while (new_irqs) {
+               level = __ffs(new_irqs);
+               pca953x_gpio_direction_input(&chip->gpio_chip, level);
+               new_irqs &= ~(1 << level);
+       }
 
        mutex_unlock(&chip->irq_lock);
 }
@@ -278,7 +290,7 @@ static int pca953x_irq_set_type(unsigned int irq, unsigned int type)
        else
                chip->irq_trig_raise &= ~mask;
 
-       return pca953x_gpio_direction_input(&chip->gpio_chip, level);
+       return 0;
 }
 
 static struct irq_chip pca953x_irq_chip = {
index 3bd8727..a263b70 100644 (file)
@@ -476,6 +476,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev->vbl_lock, irqflags);
+       dev->driver->disable_vblank(dev, crtc);
        DRM_WAKEUP(&dev->vbl_queue[crtc]);
        dev->vblank_enabled[crtc] = 0;
        dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
index 2dc9393..c3cfafc 100644 (file)
@@ -1357,6 +1357,8 @@ static void i915_setup_compression(struct drm_device *dev, int size)
 
        dev_priv->cfb_size = size;
 
+       dev_priv->compressed_fb = compressed_fb;
+
        if (IS_GM45(dev)) {
                g4x_disable_fbc(dev);
                I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
@@ -1364,12 +1366,22 @@ static void i915_setup_compression(struct drm_device *dev, int size)
                i8xx_disable_fbc(dev);
                I915_WRITE(FBC_CFB_BASE, cfb_base);
                I915_WRITE(FBC_LL_BASE, ll_base);
+               dev_priv->compressed_llb = compressed_llb;
        }
 
        DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
                  ll_base, size >> 20);
 }
 
+static void i915_cleanup_compression(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       drm_mm_put_block(dev_priv->compressed_fb);
+       if (!IS_GM45(dev))
+               drm_mm_put_block(dev_priv->compressed_llb);
+}
+
 /* true = enable decode, false = disable decoder */
 static unsigned int i915_vga_set_decode(void *cookie, bool state)
 {
@@ -1787,6 +1799,8 @@ int i915_driver_unload(struct drm_device *dev)
                mutex_lock(&dev->struct_mutex);
                i915_gem_cleanup_ringbuffer(dev);
                mutex_unlock(&dev->struct_mutex);
+               if (I915_HAS_FBC(dev) && i915_powersave)
+                       i915_cleanup_compression(dev);
                drm_mm_takedown(&dev_priv->vram);
                i915_gem_lastclose(dev);
 
index 0af3dcc..cc03537 100644 (file)
@@ -69,7 +69,8 @@ const static struct intel_device_info intel_845g_info = {
 };
 
 const static struct intel_device_info intel_i85x_info = {
-       .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+       .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+       .cursor_needs_physical = 1,
 };
 
 const static struct intel_device_info intel_i865g_info = {
@@ -151,7 +152,7 @@ const static struct pci_device_id pciidlist[] = {
        INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
        INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
        INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
-       INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
+       INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
        INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
        INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
        INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
index 6960849..6e47900 100644 (file)
@@ -195,6 +195,7 @@ struct intel_overlay;
 struct intel_device_info {
        u8 is_mobile : 1;
        u8 is_i8xx : 1;
+       u8 is_i85x : 1;
        u8 is_i915g : 1;
        u8 is_i9xx : 1;
        u8 is_i945gm : 1;
@@ -235,11 +236,14 @@ typedef struct drm_i915_private {
 
        drm_dma_handle_t *status_page_dmah;
        void *hw_status_page;
+       void *seqno_page;
        dma_addr_t dma_status_page;
        uint32_t counter;
        unsigned int status_gfx_addr;
+       unsigned int seqno_gfx_addr;
        drm_local_map_t hws_map;
        struct drm_gem_object *hws_obj;
+       struct drm_gem_object *seqno_obj;
        struct drm_gem_object *pwrctx;
 
        struct resource mch_res;
@@ -630,6 +634,9 @@ typedef struct drm_i915_private {
        u8 max_delay;
 
        enum no_fbc_reason no_fbc_reason;
+
+       struct drm_mm_node *compressed_fb;
+       struct drm_mm_node *compressed_llb;
 } drm_i915_private_t;
 
 /** driver private structure attached to each drm_gem_object */
@@ -1070,7 +1077,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 
 #define IS_I830(dev)           ((dev)->pci_device == 0x3577)
 #define IS_845G(dev)           ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev)           ((dev)->pci_device == 0x3582)
+#define IS_I85X(dev)           (INTEL_INFO(dev)->is_i85x)
 #define IS_I865G(dev)          ((dev)->pci_device == 0x2572)
 #define IS_GEN2(dev)           (INTEL_INFO(dev)->is_i8xx)
 #define IS_I915G(dev)          (INTEL_INFO(dev)->is_i915g)
@@ -1135,6 +1142,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 
 #define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) ||        \
                            IS_GEN6(dev))
+#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
 
 #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
 
index 80871c6..ef3d91d 100644 (file)
@@ -1588,6 +1588,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
        }
 }
 
+#define PIPE_CONTROL_FLUSH(addr)                                       \
+       OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |          \
+                PIPE_CONTROL_DEPTH_STALL);                             \
+       OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);                       \
+       OUT_RING(0);                                                    \
+       OUT_RING(0);                                                    \
+
 /**
  * Creates a new sequence number, emitting a write of it to the status page
  * plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1622,13 +1629,47 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
        if (dev_priv->mm.next_gem_seqno == 0)
                dev_priv->mm.next_gem_seqno++;
 
-       BEGIN_LP_RING(4);
-       OUT_RING(MI_STORE_DWORD_INDEX);
-       OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       OUT_RING(seqno);
+       if (HAS_PIPE_CONTROL(dev)) {
+               u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
 
-       OUT_RING(MI_USER_INTERRUPT);
-       ADVANCE_LP_RING();
+               /*
+                * Workaround qword write incoherence by flushing the
+                * PIPE_NOTIFY buffers out to memory before requesting
+                * an interrupt.
+                */
+               BEGIN_LP_RING(32);
+               OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+                        PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+               OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+               OUT_RING(seqno);
+               OUT_RING(0);
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               scratch_addr += 128; /* write to separate cachelines */
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               scratch_addr += 128;
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               scratch_addr += 128;
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               scratch_addr += 128;
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               scratch_addr += 128;
+               PIPE_CONTROL_FLUSH(scratch_addr);
+               OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+                        PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+                        PIPE_CONTROL_NOTIFY);
+               OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+               OUT_RING(seqno);
+               OUT_RING(0);
+               ADVANCE_LP_RING();
+       } else {
+               BEGIN_LP_RING(4);
+               OUT_RING(MI_STORE_DWORD_INDEX);
+               OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+               OUT_RING(seqno);
+
+               OUT_RING(MI_USER_INTERRUPT);
+               ADVANCE_LP_RING();
+       }
 
        DRM_DEBUG_DRIVER("%d\n", seqno);
 
@@ -1752,7 +1793,10 @@ i915_get_gem_seqno(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
 
-       return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+       if (HAS_PIPE_CONTROL(dev))
+               return ((volatile u32 *)(dev_priv->seqno_page))[0];
+       else
+               return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
 }
 
 /**
@@ -2362,6 +2406,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
        pitch_val = obj_priv->stride / tile_width;
        pitch_val = ffs(pitch_val) - 1;
 
+       if (obj_priv->tiling_mode == I915_TILING_Y &&
+           HAS_128_BYTE_Y_TILING(dev))
+               WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
+       else
+               WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
+
        val = obj_priv->gtt_offset;
        if (obj_priv->tiling_mode == I915_TILING_Y)
                val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -4546,6 +4596,49 @@ i915_gem_idle(struct drm_device *dev)
        return 0;
 }
 
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+static int
+i915_gem_init_pipe_control(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+       int ret;
+
+       obj = drm_gem_object_alloc(dev, 4096);
+       if (obj == NULL) {
+               DRM_ERROR("Failed to allocate seqno page\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+       obj_priv = to_intel_bo(obj);
+       obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+       ret = i915_gem_object_pin(obj, 4096);
+       if (ret)
+               goto err_unref;
+
+       dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
+       dev_priv->seqno_page =  kmap(obj_priv->pages[0]);
+       if (dev_priv->seqno_page == NULL)
+               goto err_unpin;
+
+       dev_priv->seqno_obj = obj;
+       memset(dev_priv->seqno_page, 0, PAGE_SIZE);
+
+       return 0;
+
+err_unpin:
+       i915_gem_object_unpin(obj);
+err_unref:
+       drm_gem_object_unreference(obj);
+err:
+       return ret;
+}
+
 static int
 i915_gem_init_hws(struct drm_device *dev)
 {
@@ -4563,7 +4656,8 @@ i915_gem_init_hws(struct drm_device *dev)
        obj = drm_gem_object_alloc(dev, 4096);
        if (obj == NULL) {
                DRM_ERROR("Failed to allocate status page\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto err;
        }
        obj_priv = to_intel_bo(obj);
        obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
@@ -4571,7 +4665,7 @@ i915_gem_init_hws(struct drm_device *dev)
        ret = i915_gem_object_pin(obj, 4096);
        if (ret != 0) {
                drm_gem_object_unreference(obj);
-               return ret;
+               goto err_unref;
        }
 
        dev_priv->status_gfx_addr = obj_priv->gtt_offset;
@@ -4580,10 +4674,16 @@ i915_gem_init_hws(struct drm_device *dev)
        if (dev_priv->hw_status_page == NULL) {
                DRM_ERROR("Failed to map status page.\n");
                memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-               i915_gem_object_unpin(obj);
-               drm_gem_object_unreference(obj);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_unpin;
        }
+
+       if (HAS_PIPE_CONTROL(dev)) {
+               ret = i915_gem_init_pipe_control(dev);
+               if (ret)
+                       goto err_unpin;
+       }
+
        dev_priv->hws_obj = obj;
        memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
        if (IS_GEN6(dev)) {
@@ -4596,6 +4696,30 @@ i915_gem_init_hws(struct drm_device *dev)
        DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
 
        return 0;
+
+err_unpin:
+       i915_gem_object_unpin(obj);
+err_unref:
+       drm_gem_object_unreference(obj);
+err:
+       return 0;
+}
+
+static void
+i915_gem_cleanup_pipe_control(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+
+       obj = dev_priv->seqno_obj;
+       obj_priv = to_intel_bo(obj);
+       kunmap(obj_priv->pages[0]);
+       i915_gem_object_unpin(obj);
+       drm_gem_object_unreference(obj);
+       dev_priv->seqno_obj = NULL;
+
+       dev_priv->seqno_page = NULL;
 }
 
 static void
@@ -4619,6 +4743,9 @@ i915_gem_cleanup_hws(struct drm_device *dev)
        memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
        dev_priv->hw_status_page = NULL;
 
+       if (HAS_PIPE_CONTROL(dev))
+               i915_gem_cleanup_pipe_control(dev);
+
        /* Write high address into HWS_PGA when disabling. */
        I915_WRITE(HWS_PGA, 0x1ffff000);
 }
index 449157f..4bdccef 100644 (file)
@@ -202,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
                 * reg, so dont bother to check the size */
                if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
                        return false;
-       } else if (IS_I9XX(dev)) {
-               uint32_t pitch_val = ffs(stride / tile_width) - 1;
-
-               /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
-                * instead of 4 (2KB) on 945s.
-                */
-               if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
-                   size > (I830_FENCE_MAX_SIZE_VAL << 20))
+       } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
+               if (stride > 8192)
                        return false;
-       } else {
-               uint32_t pitch_val = ffs(stride / tile_width) - 1;
 
-               if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
-                   size > (I830_FENCE_MAX_SIZE_VAL << 19))
-                       return false;
+               if (IS_GEN3(dev)) {
+                       if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+                               return false;
+               } else {
+                       if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+                               return false;
+               }
        }
 
        /* 965+ just needs multiples of tile width */
index 6421481..2b8b969 100644 (file)
@@ -349,7 +349,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
                                READ_BREADCRUMB(dev_priv);
        }
 
-       if (gt_iir & GT_USER_INTERRUPT) {
+       if (gt_iir & GT_PIPE_NOTIFY) {
                u32 seqno = i915_get_gem_seqno(dev);
                dev_priv->mm.irq_gem_seqno = seqno;
                trace_i915_gem_request_complete(dev, seqno);
@@ -1005,7 +1005,7 @@ void i915_user_irq_get(struct drm_device *dev)
        spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
        if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
                if (HAS_PCH_SPLIT(dev))
-                       ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+                       ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
                else
                        i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
        }
@@ -1021,7 +1021,7 @@ void i915_user_irq_put(struct drm_device *dev)
        BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
        if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
                if (HAS_PCH_SPLIT(dev))
-                       ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+                       ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
                else
                        i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
        }
@@ -1305,7 +1305,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
        /* enable kind of interrupts always enabled */
        u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
                           DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
-       u32 render_mask = GT_USER_INTERRUPT;
+       u32 render_mask = GT_PIPE_NOTIFY;
        u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
                           SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
 
index 7cc8410..8fcc75c 100644 (file)
@@ -382,8 +382,57 @@ static void intel_didl_outputs(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_opregion *opregion = &dev_priv->opregion;
        struct drm_connector *connector;
+       acpi_handle handle;
+       struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
+       unsigned long long device_id;
+       acpi_status status;
        int i = 0;
 
+       handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+       if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
+               return;
+
+       if (acpi_is_video_device(acpi_dev))
+               acpi_video_bus = acpi_dev;
+       else {
+               list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
+                       if (acpi_is_video_device(acpi_cdev)) {
+                               acpi_video_bus = acpi_cdev;
+                               break;
+                       }
+               }
+       }
+
+       if (!acpi_video_bus) {
+               printk(KERN_WARNING "No ACPI video bus found\n");
+               return;
+       }
+
+       list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
+               if (i >= 8) {
+                       dev_printk (KERN_ERR, &dev->pdev->dev,
+                                   "More than 8 outputs detected\n");
+                       return;
+               }
+               status =
+                       acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+                                               NULL, &device_id);
+               if (ACPI_SUCCESS(status)) {
+                       if (!device_id)
+                               goto blind_set;
+                       opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
+                       i++;
+               }
+       }
+
+end:
+       /* If fewer than 8 outputs, the list must be null terminated */
+       if (i < 8)
+               opregion->acpi->didl[i] = 0;
+       return;
+
+blind_set:
+       i = 0;
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                int output_type = ACPI_OTHER_OUTPUT;
                if (i >= 8) {
@@ -416,10 +465,7 @@ static void intel_didl_outputs(struct drm_device *dev)
                opregion->acpi->didl[i] |= (1<<31) | output_type | i;
                i++;
        }
-
-       /* If fewer than 8 outputs, the list must be null terminated */
-       if (i < 8)
-               opregion->acpi->didl[i] = 0;
+       goto end;
 }
 
 int intel_opregion_init(struct drm_device *dev, int resume)
index cbbf59f..4cbc521 100644 (file)
 #define   ASYNC_FLIP                (1<<22)
 #define   DISPLAY_PLANE_A           (0<<20)
 #define   DISPLAY_PLANE_B           (1<<20)
+#define GFX_OP_PIPE_CONTROL    ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
+#define   PIPE_CONTROL_QW_WRITE        (1<<14)
+#define   PIPE_CONTROL_DEPTH_STALL (1<<13)
+#define   PIPE_CONTROL_WC_FLUSH        (1<<12)
+#define   PIPE_CONTROL_IS_FLUSH        (1<<11) /* MBZ on Ironlake */
+#define   PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
+#define   PIPE_CONTROL_ISP_DIS (1<<9)
+#define   PIPE_CONTROL_NOTIFY  (1<<8)
+#define   PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+#define   PIPE_CONTROL_STALL_EN        (1<<1) /* in addr word, Ironlake+ only */
 
 /*
  * Fence registers
 #define   I830_FENCE_SIZE_BITS(size)   ((ffs((size) >> 19) - 1) << 8)
 #define   I830_FENCE_PITCH_SHIFT       4
 #define   I830_FENCE_REG_VALID         (1<<0)
-#define   I915_FENCE_MAX_PITCH_VAL     0x10
+#define   I915_FENCE_MAX_PITCH_VAL     4
 #define   I830_FENCE_MAX_PITCH_VAL     6
 #define   I830_FENCE_MAX_SIZE_VAL      (1<<8)
 
 #define DEIER   0x4400c
 
 /* GT interrupt */
+#define GT_PIPE_NOTIFY         (1 << 4)
 #define GT_SYNC_STATUS          (1 << 2)
 #define GT_USER_INTERRUPT       (1 << 0)
 
index e7356fb..c7502b6 100644 (file)
@@ -4853,17 +4853,18 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.update_wm = g4x_update_wm;
        else if (IS_I965G(dev))
                dev_priv->display.update_wm = i965_update_wm;
-       else if (IS_I9XX(dev) || IS_MOBILE(dev)) {
+       else if (IS_I9XX(dev)) {
                dev_priv->display.update_wm = i9xx_update_wm;
                dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+       } else if (IS_I85X(dev)) {
+               dev_priv->display.update_wm = i9xx_update_wm;
+               dev_priv->display.get_fifo_size = i85x_get_fifo_size;
        } else {
-               if (IS_I85X(dev))
-                       dev_priv->display.get_fifo_size = i85x_get_fifo_size;
-               else if (IS_845G(dev))
+               dev_priv->display.update_wm = i830_update_wm;
+               if (IS_845G(dev))
                        dev_priv->display.get_fifo_size = i845_get_fifo_size;
                else
                        dev_priv->display.get_fifo_size = i830_get_fifo_size;
-               dev_priv->display.update_wm = i830_update_wm;
        }
 }
 
index bd75f99..eaf1f6b 100644 (file)
@@ -324,13 +324,12 @@ void r300_gpu_init(struct radeon_device *rdev)
        uint32_t gb_tile_config, tmp;
 
        r100_hdp_reset(rdev);
-       /* FIXME: rv380 one pipes ? */
        if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
-           (rdev->family == CHIP_R350)) {
+           (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
                /* r300,r350 */
                rdev->num_gb_pipes = 2;
        } else {
-               /* rv350,rv370,rv380,r300 AD */
+               /* rv350,rv370,rv380,r300 AD, r350 AH */
                rdev->num_gb_pipes = 1;
        }
        rdev->num_z_pipes = 1;
index ea46d55..c5c2742 100644 (file)
@@ -921,7 +921,7 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
 
        ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
                        sizeof(stack_ptr_addr), &stack_ptr_addr);
-       ref_age_base = (u32 *)(unsigned long)*ptr_addr;
+       ref_age_base = (u32 *)(unsigned long)get_unaligned(ptr_addr);
 
        for (i=0; i < header.scratch.n_bufs; i++) {
                buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
index 3dc968c..c2bda4a 100644 (file)
@@ -59,6 +59,12 @@ void r420_pipes_init(struct radeon_device *rdev)
        /* get max number of pipes */
        gb_pipe_select = RREG32(0x402C);
        num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
+
+       /* SE chips have 1 pipe */
+       if ((rdev->pdev->device == 0x5e4c) ||
+           (rdev->pdev->device == 0x5e4f))
+               num_pipes = 1;
+
        rdev->num_gb_pipes = num_pipes;
        tmp = 0;
        switch (num_pipes) {
index 419630d..2f042a3 100644 (file)
@@ -435,14 +435,19 @@ static void radeon_init_pipes(struct drm_device *dev)
        if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
                gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
                dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
+               /* SE cards have 1 pipe */
+               if ((dev->pdev->device == 0x5e4c) ||
+                   (dev->pdev->device == 0x5e4f))
+                       dev_priv->num_gb_pipes = 1;
        } else {
                /* R3xx */
                if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
                     dev->pdev->device != 0x4144) ||
-                   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
+                   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 &&
+                    dev->pdev->device != 0x4148)) {
                        dev_priv->num_gb_pipes = 2;
                } else {
-                       /* RV3xx/R300 AD */
+                       /* RV3xx/R300 AD/R350 AH */
                        dev_priv->num_gb_pipes = 1;
                }
        }
index b8d6728..bb1c122 100644 (file)
@@ -86,12 +86,12 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
        WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
        WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
 
-       WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id);
-       WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007);
+       WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+       WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
 
-       WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0);
+       WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
        for (i = 0; i < 256; i++) {
-               WREG32(EVERGREEN_DC_LUT_30_COLOR,
+               WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
                       (radeon_crtc->lut_r[i] << 20) |
                       (radeon_crtc->lut_g[i] << 10) |
                       (radeon_crtc->lut_b[i] << 0));
index 30293be..fed7b80 100644 (file)
@@ -1326,7 +1326,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
 
        radeon_encoder->pixel_clock = adjusted_mode->clock;
 
-       if (ASIC_IS_AVIVO(rdev)) {
+       if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
                if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
                        atombios_yuv_setup(encoder, true);
                else
index d3657dc..c633319 100644 (file)
@@ -165,7 +165,7 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
 {
        struct radeon_device *rdev = dev->dev_private;
 
-       if (crtc < 0 || crtc > 1) {
+       if (crtc < 0 || crtc >= rdev->num_crtc) {
                DRM_ERROR("Invalid crtc %d\n", crtc);
                return -EINVAL;
        }
@@ -177,7 +177,7 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
 {
        struct radeon_device *rdev = dev->dev_private;
 
-       if (crtc < 0 || crtc > 1) {
+       if (crtc < 0 || crtc >= rdev->num_crtc) {
                DRM_ERROR("Invalid crtc %d\n", crtc);
                return -EINVAL;
        }
@@ -191,7 +191,7 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
 {
        struct radeon_device *rdev = dev->dev_private;
 
-       if (crtc < 0 || crtc > 1) {
+       if (crtc < 0 || crtc >= rdev->num_crtc) {
                DRM_ERROR("Invalid crtc %d\n", crtc);
                return;
        }
index 75f3fa5..16c4202 100644 (file)
@@ -1169,15 +1169,19 @@ static int atk_create_files(struct atk_data *data)
        int err;
 
        list_for_each_entry(s, &data->sensor_list, list) {
+               sysfs_attr_init(&s->input_attr.attr);
                err = device_create_file(data->hwmon_dev, &s->input_attr);
                if (err)
                        return err;
+               sysfs_attr_init(&s->label_attr.attr);
                err = device_create_file(data->hwmon_dev, &s->label_attr);
                if (err)
                        return err;
+               sysfs_attr_init(&s->limit1_attr.attr);
                err = device_create_file(data->hwmon_dev, &s->limit1_attr);
                if (err)
                        return err;
+               sysfs_attr_init(&s->limit2_attr.attr);
                err = device_create_file(data->hwmon_dev, &s->limit2_attr);
                if (err)
                        return err;
index be475e8..c8ab505 100644 (file)
@@ -217,6 +217,10 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
        AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted),
        AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
        AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
+       AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
+       AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
+       AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
+       AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
        { NULL, }
 /* Laptop models without axis info (yet):
  * "NC6910" "HP Compaq 6910"
index ab87e4f..defce28 100644 (file)
@@ -409,6 +409,8 @@ static struct pcmcia_device_id ide_ids[] = {
        PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
        PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
        PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
+       PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17),
+       PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
        PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
        PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2      ", 0x547e66dc, 0x8671043b),
        PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
@@ -429,6 +431,8 @@ static struct pcmcia_device_id ide_ids[] = {
        PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
        PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
        PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
+       PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d),
+       PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
        PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
        PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
        PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
index 20e4840..58ea0ec 100644 (file)
@@ -1650,7 +1650,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
                                     int previous, int *dd_idx,
                                     struct stripe_head *sh)
 {
-       sector_t stripe;
+       sector_t stripe, stripe2;
        sector_t chunk_number;
        unsigned int chunk_offset;
        int pd_idx, qd_idx;
@@ -1677,7 +1677,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
         */
        stripe = chunk_number;
        *dd_idx = sector_div(stripe, data_disks);
-
+       stripe2 = stripe;
        /*
         * Select the parity disk based on the user selected algorithm.
         */
@@ -1689,21 +1689,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
        case 5:
                switch (algorithm) {
                case ALGORITHM_LEFT_ASYMMETRIC:
-                       pd_idx = data_disks - stripe % raid_disks;
+                       pd_idx = data_disks - sector_div(stripe2, raid_disks);
                        if (*dd_idx >= pd_idx)
                                (*dd_idx)++;
                        break;
                case ALGORITHM_RIGHT_ASYMMETRIC:
-                       pd_idx = stripe % raid_disks;
+                       pd_idx = sector_div(stripe2, raid_disks);
                        if (*dd_idx >= pd_idx)
                                (*dd_idx)++;
                        break;
                case ALGORITHM_LEFT_SYMMETRIC:
-                       pd_idx = data_disks - stripe % raid_disks;
+                       pd_idx = data_disks - sector_div(stripe2, raid_disks);
                        *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
                        break;
                case ALGORITHM_RIGHT_SYMMETRIC:
-                       pd_idx = stripe % raid_disks;
+                       pd_idx = sector_div(stripe2, raid_disks);
                        *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
                        break;
                case ALGORITHM_PARITY_0:
@@ -1723,7 +1723,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
 
                switch (algorithm) {
                case ALGORITHM_LEFT_ASYMMETRIC:
-                       pd_idx = raid_disks - 1 - (stripe % raid_disks);
+                       pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
                        qd_idx = pd_idx + 1;
                        if (pd_idx == raid_disks-1) {
                                (*dd_idx)++;    /* Q D D D P */
@@ -1732,7 +1732,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
                                (*dd_idx) += 2; /* D D P Q D */
                        break;
                case ALGORITHM_RIGHT_ASYMMETRIC:
-                       pd_idx = stripe % raid_disks;
+                       pd_idx = sector_div(stripe2, raid_disks);
                        qd_idx = pd_idx + 1;
                        if (pd_idx == raid_disks-1) {
                                (*dd_idx)++;    /* Q D D D P */
@@ -1741,12 +1741,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
                                (*dd_idx) += 2; /* D D P Q D */
                        break;
                case ALGORITHM_LEFT_SYMMETRIC:
-                       pd_idx = raid_disks - 1 - (stripe % raid_disks);
+                       pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
                        qd_idx = (pd_idx + 1) % raid_disks;
                        *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
                        break;
                case ALGORITHM_RIGHT_SYMMETRIC:
-                       pd_idx = stripe % raid_disks;
+                       pd_idx = sector_div(stripe2, raid_disks);
                        qd_idx = (pd_idx + 1) % raid_disks;
                        *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
                        break;
@@ -1765,7 +1765,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
                        /* Exactly the same as RIGHT_ASYMMETRIC, but or
                         * of blocks for computing Q is different.
                         */
-                       pd_idx = stripe % raid_disks;
+                       pd_idx = sector_div(stripe2, raid_disks);
                        qd_idx = pd_idx + 1;
                        if (pd_idx == raid_disks-1) {
                                (*dd_idx)++;    /* Q D D D P */
@@ -1780,7 +1780,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
                         * D D D P Q  rather than
                         * Q D D D P
                         */
-                       pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks);
+                       stripe2 += 1;
+                       pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
                        qd_idx = pd_idx + 1;
                        if (pd_idx == raid_disks-1) {
                                (*dd_idx)++;    /* Q D D D P */
@@ -1792,7 +1793,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
 
                case ALGORITHM_ROTATING_N_CONTINUE:
                        /* Same as left_symmetric but Q is before P */
-                       pd_idx = raid_disks - 1 - (stripe % raid_disks);
+                       pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
                        qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
                        *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
                        ddf_layout = 1;
@@ -1800,27 +1801,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
 
                case ALGORITHM_LEFT_ASYMMETRIC_6:
                        /* RAID5 left_asymmetric, with Q on last device */
-                       pd_idx = data_disks - stripe % (raid_disks-1);
+                       pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
                        if (*dd_idx >= pd_idx)
                                (*dd_idx)++;
                        qd_idx = raid_disks - 1;
                        break;
 
                case ALGORITHM_RIGHT_ASYMMETRIC_6:
-                       pd_idx = stripe % (raid_disks-1);
+                       pd_idx = sector_div(stripe2, raid_disks-1);
                        if (*dd_idx >= pd_idx)
                                (*dd_idx)++;
                        qd_idx = raid_disks - 1;
                        break;
 
                case ALGORITHM_LEFT_SYMMETRIC_6:
-                       pd_idx = data_disks - stripe % (raid_disks-1);
+                       pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
                        *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
                        qd_idx = raid_disks - 1;
                        break;
 
                case ALGORITHM_RIGHT_SYMMETRIC_6:
-                       pd_idx = stripe % (raid_disks-1);
+                       pd_idx = sector_div(stripe2, raid_disks-1);
                        *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
                        qd_idx = raid_disks - 1;
                        break;
index 2191c8d..0d0d625 100644 (file)
@@ -311,6 +311,22 @@ config TI_DAC7512
          This driver can also be built as a module. If so, the module
          will be calles ti_dac7512.
 
+config VMWARE_BALLOON
+       tristate "VMware Balloon Driver"
+       depends on X86
+       help
+         This is VMware physical memory management driver which acts
+         like a "balloon" that can be inflated to reclaim physical pages
+         by reserving them in the guest and invalidating them in the
+         monitor, freeing up the underlying machine pages so they can
+         be allocated to other guests. The balloon can also be deflated
+         to allow the guest to use more physical memory.
+
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called vmware_balloon.
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
index 27c4843..7b6f7ee 100644 (file)
@@ -29,3 +29,4 @@ obj-$(CONFIG_C2PORT)          += c2port/
 obj-$(CONFIG_IWMC3200TOP)      += iwmc3200top/
 obj-y                          += eeprom/
 obj-y                          += cb710/
+obj-$(CONFIG_VMWARE_BALLOON)   += vmware_balloon.o
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmware_balloon.c
new file mode 100644 (file)
index 0000000..e7161c4
--- /dev/null
@@ -0,0 +1,832 @@
+/*
+ * VMware Balloon driver.
+ *
+ * Copyright (C) 2000-2010, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained by: Dmitry Torokhov <dtor@vmware.com>
+ */
+
+/*
+ * This is VMware physical memory management driver for Linux. The driver
+ * acts like a "balloon" that can be inflated to reclaim physical pages by
+ * reserving them in the guest and invalidating them in the monitor,
+ * freeing up the underlying machine pages so they can be allocated to
+ * other guests.  The balloon can also be deflated to allow the guest to
+ * use more physical memory. Higher level policies can control the sizes
+ * of balloons in VMs in order to manage physical memory resources.
+ */
+
+//#define DEBUG
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/vmware.h>
+
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
+MODULE_VERSION("1.2.1.0-K");
+MODULE_ALIAS("dmi:*:svnVMware*:*");
+MODULE_ALIAS("vmware_vmmemctl");
+MODULE_LICENSE("GPL");
+
+/*
+ * Various constants controlling rate of inflaint/deflating balloon,
+ * measured in pages.
+ */
+
+/*
+ * Rate of allocating memory when there is no memory pressure
+ * (driver performs non-sleeping allocations).
+ */
+#define VMW_BALLOON_NOSLEEP_ALLOC_MAX  16384U
+
+/*
+ * Rates of memory allocaton when guest experiences memory pressure
+ * (driver performs sleeping allocations).
+ */
+#define VMW_BALLOON_RATE_ALLOC_MIN     512U
+#define VMW_BALLOON_RATE_ALLOC_MAX     2048U
+#define VMW_BALLOON_RATE_ALLOC_INC     16U
+
+/*
+ * Rates for releasing pages while deflating balloon.
+ */
+#define VMW_BALLOON_RATE_FREE_MIN      512U
+#define VMW_BALLOON_RATE_FREE_MAX      16384U
+#define VMW_BALLOON_RATE_FREE_INC      16U
+
+/*
+ * When guest is under memory pressure, use a reduced page allocation
+ * rate for next several cycles.
+ */
+#define VMW_BALLOON_SLOW_CYCLES                4
+
+/*
+ * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
+ * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
+ * __GFP_NOWARN, to suppress page allocation failure warnings.
+ */
+#define VMW_PAGE_ALLOC_NOSLEEP         (__GFP_HIGHMEM|__GFP_NOWARN)
+
+/*
+ * Use GFP_HIGHUSER when executing in a separate kernel thread
+ * context and allocation can sleep.  This is less stressful to
+ * the guest memory system, since it allows the thread to block
+ * while memory is reclaimed, and won't take pages from emergency
+ * low-memory pools.
+ */
+#define VMW_PAGE_ALLOC_CANSLEEP                (GFP_HIGHUSER)
+
+/* Maximum number of page allocations without yielding processor */
+#define VMW_BALLOON_YIELD_THRESHOLD    1024
+
+
+/*
+ * Hypervisor communication port definitions.
+ */
+#define VMW_BALLOON_HV_PORT            0x5670
+#define VMW_BALLOON_HV_MAGIC           0x456c6d6f
+#define VMW_BALLOON_PROTOCOL_VERSION   2
+#define VMW_BALLOON_GUEST_ID           1       /* Linux */
+
+#define VMW_BALLOON_CMD_START          0
+#define VMW_BALLOON_CMD_GET_TARGET     1
+#define VMW_BALLOON_CMD_LOCK           2
+#define VMW_BALLOON_CMD_UNLOCK         3
+#define VMW_BALLOON_CMD_GUEST_ID       4
+
+/* error codes */
+#define VMW_BALLOON_SUCCESS            0
+#define VMW_BALLOON_FAILURE            -1
+#define VMW_BALLOON_ERROR_CMD_INVALID  1
+#define VMW_BALLOON_ERROR_PPN_INVALID  2
+#define VMW_BALLOON_ERROR_PPN_LOCKED   3
+#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
+#define VMW_BALLOON_ERROR_PPN_PINNED   5
+#define VMW_BALLOON_ERROR_PPN_NOTNEEDED        6
+#define VMW_BALLOON_ERROR_RESET                7
+#define VMW_BALLOON_ERROR_BUSY         8
+
+#define VMWARE_BALLOON_CMD(cmd, data, result)          \
+({                                                     \
+       unsigned long __stat, __dummy1, __dummy2;       \
+       __asm__ __volatile__ ("inl (%%dx)" :            \
+               "=a"(__stat),                           \
+               "=c"(__dummy1),                         \
+               "=d"(__dummy2),                         \
+               "=b"(result) :                          \
+               "0"(VMW_BALLOON_HV_MAGIC),              \
+               "1"(VMW_BALLOON_CMD_##cmd),             \
+               "2"(VMW_BALLOON_HV_PORT),               \
+               "3"(data) :                             \
+               "memory");                              \
+       result &= -1UL;                                 \
+       __stat & -1UL;                                  \
+})
+
+#ifdef CONFIG_DEBUG_FS
+struct vmballoon_stats {
+       unsigned int timer;
+
+       /* allocation statustics */
+       unsigned int alloc;
+       unsigned int alloc_fail;
+       unsigned int sleep_alloc;
+       unsigned int sleep_alloc_fail;
+       unsigned int refused_alloc;
+       unsigned int refused_free;
+       unsigned int free;
+
+       /* monitor operations */
+       unsigned int lock;
+       unsigned int lock_fail;
+       unsigned int unlock;
+       unsigned int unlock_fail;
+       unsigned int target;
+       unsigned int target_fail;
+       unsigned int start;
+       unsigned int start_fail;
+       unsigned int guest_type;
+       unsigned int guest_type_fail;
+};
+
+#define STATS_INC(stat) (stat)++
+#else
+#define STATS_INC(stat)
+#endif
+
+struct vmballoon {
+
+       /* list of reserved physical pages */
+       struct list_head pages;
+
+       /* transient list of non-balloonable pages */
+       struct list_head refused_pages;
+
+       /* balloon size in pages */
+       unsigned int size;
+       unsigned int target;
+
+       /* reset flag */
+       bool reset_required;
+
+       /* adjustment rates (pages per second) */
+       unsigned int rate_alloc;
+       unsigned int rate_free;
+
+       /* slowdown page allocations for next few cycles */
+       unsigned int slow_allocation_cycles;
+
+#ifdef CONFIG_DEBUG_FS
+       /* statistics */
+       struct vmballoon_stats stats;
+
+       /* debugfs file exporting statistics */
+       struct dentry *dbg_entry;
+#endif
+
+       struct sysinfo sysinfo;
+
+       struct delayed_work dwork;
+};
+
+static struct vmballoon balloon;
+static struct workqueue_struct *vmballoon_wq;
+
+/*
+ * Send "start" command to the host, communicating supported version
+ * of the protocol.
+ */
+static bool vmballoon_send_start(struct vmballoon *b)
+{
+       unsigned long status, dummy;
+
+       STATS_INC(b->stats.start);
+
+       status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy);
+       if (status == VMW_BALLOON_SUCCESS)
+               return true;
+
+       pr_debug("%s - failed, hv returns %ld\n", __func__, status);
+       STATS_INC(b->stats.start_fail);
+       return false;
+}
+
+static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
+{
+       switch (status) {
+       case VMW_BALLOON_SUCCESS:
+               return true;
+
+       case VMW_BALLOON_ERROR_RESET:
+               b->reset_required = true;
+               /* fall through */
+
+       default:
+               return false;
+       }
+}
+
+/*
+ * Communicate guest type to the host so that it can adjust ballooning
+ * algorithm to the one most appropriate for the guest. This command
+ * is normally issued after sending "start" command and is part of
+ * standard reset sequence.
+ */
+static bool vmballoon_send_guest_id(struct vmballoon *b)
+{
+       unsigned long status, dummy;
+
+       status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy);
+
+       STATS_INC(b->stats.guest_type);
+
+       if (vmballoon_check_status(b, status))
+               return true;
+
+       pr_debug("%s - failed, hv returns %ld\n", __func__, status);
+       STATS_INC(b->stats.guest_type_fail);
+       return false;
+}
+
+/*
+ * Retrieve desired balloon size from the host.
+ */
+static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
+{
+       unsigned long status;
+       unsigned long target;
+       unsigned long limit;
+       u32 limit32;
+
+       /*
+        * si_meminfo() is cheap. Moreover, we want to provide dynamic
+        * max balloon size later. So let us call si_meminfo() every
+        * iteration.
+        */
+       si_meminfo(&b->sysinfo);
+       limit = b->sysinfo.totalram;
+
+       /* Ensure limit fits in 32-bits */
+       limit32 = (u32)limit;
+       if (limit != limit32)
+               return false;
+
+       /* update stats */
+       STATS_INC(b->stats.target);
+
+       status = VMWARE_BALLOON_CMD(GET_TARGET, limit, target);
+       if (vmballoon_check_status(b, status)) {
+               *new_target = target;
+               return true;
+       }
+
+       pr_debug("%s - failed, hv returns %ld\n", __func__, status);
+       STATS_INC(b->stats.target_fail);
+       return false;
+}
+
+/*
+ * Notify the host about allocated page so that host can use it without
+ * fear that guest will need it. Host may reject some pages, we need to
+ * check the return value and maybe submit a different page.
+ */
+static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn)
+{
+       unsigned long status, dummy;
+       u32 pfn32;
+
+       pfn32 = (u32)pfn;
+       if (pfn32 != pfn)
+               return false;
+
+       STATS_INC(b->stats.lock);
+
+       status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
+       if (vmballoon_check_status(b, status))
+               return true;
+
+       pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
+       STATS_INC(b->stats.lock_fail);
+       return false;
+}
+
+/*
+ * Notify the host that guest intends to release given page back into
+ * the pool of available (to the guest) pages.
+ */
+static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn)
+{
+       unsigned long status, dummy;
+       u32 pfn32;
+
+       pfn32 = (u32)pfn;
+       if (pfn32 != pfn)
+               return false;
+
+       STATS_INC(b->stats.unlock);
+
+       status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy);
+       if (vmballoon_check_status(b, status))
+               return true;
+
+       pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
+       STATS_INC(b->stats.unlock_fail);
+       return false;
+}
+
+/*
+ * Quickly release all pages allocated for the balloon. This function is
+ * called when host decides to "reset" balloon for one reason or another.
+ * Unlike normal "deflate" we do not (shall not) notify host of the pages
+ * being released.
+ */
+static void vmballoon_pop(struct vmballoon *b)
+{
+       struct page *page, *next;
+       unsigned int count = 0;
+
+       list_for_each_entry_safe(page, next, &b->pages, lru) {
+               list_del(&page->lru);
+               __free_page(page);
+               STATS_INC(b->stats.free);
+               b->size--;
+
+               if (++count >= b->rate_free) {
+                       count = 0;
+                       cond_resched();
+               }
+       }
+}
+
+/*
+ * Perform standard reset sequence by popping the balloon (in case it
+ * is not  empty) and then restarting protocol. This operation normally
+ * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
+ */
+static void vmballoon_reset(struct vmballoon *b)
+{
+       /* free all pages, skipping monitor unlock */
+       vmballoon_pop(b);
+
+       if (vmballoon_send_start(b)) {
+               b->reset_required = false;
+               if (!vmballoon_send_guest_id(b))
+                       pr_err("failed to send guest ID to the host\n");
+       }
+}
+
+/*
+ * Allocate (or reserve) a page for the balloon and notify the host.  If host
+ * refuses the page put it on "refuse" list and allocate another one until host
+ * is satisfied. "Refused" pages are released at the end of inflation cycle
+ * (when we allocate b->rate_alloc pages).
+ */
+static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
+{
+       struct page *page;
+       gfp_t flags;
+       bool locked = false;
+
+       do {
+               if (!can_sleep)
+                       STATS_INC(b->stats.alloc);
+               else
+                       STATS_INC(b->stats.sleep_alloc);
+
+               flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
+               page = alloc_page(flags);
+               if (!page) {
+                       if (!can_sleep)
+                               STATS_INC(b->stats.alloc_fail);
+                       else
+                               STATS_INC(b->stats.sleep_alloc_fail);
+                       return -ENOMEM;
+               }
+
+               /* inform monitor */
+               locked = vmballoon_send_lock_page(b, page_to_pfn(page));
+               if (!locked) {
+                       if (b->reset_required) {
+                               __free_page(page);
+                               return -EIO;
+                       }
+
+                       /* place on list of non-balloonable pages, retry allocation */
+                       list_add(&page->lru, &b->refused_pages);
+                       STATS_INC(b->stats.refused_alloc);
+               }
+       } while (!locked);
+
+       /* track allocated page */
+       list_add(&page->lru, &b->pages);
+
+       /* update balloon size */
+       b->size++;
+
+       return 0;
+}
+
+/*
+ * Release the page allocated for the balloon. Note that we first notify
+ * the host so it can make sure the page will be available for the guest
+ * to use, if needed.
+ */
+static int vmballoon_release_page(struct vmballoon *b, struct page *page)
+{
+       if (!vmballoon_send_unlock_page(b, page_to_pfn(page)))
+               return -EIO;
+
+       list_del(&page->lru);
+
+       /* deallocate page */
+       __free_page(page);
+       STATS_INC(b->stats.free);
+
+       /* update balloon size */
+       b->size--;
+
+       return 0;
+}
+
+/*
+ * Release pages that were allocated while attempting to inflate the
+ * balloon but were refused by the host for one reason or another.
+ */
+static void vmballoon_release_refused_pages(struct vmballoon *b)
+{
+       struct page *page, *next;
+
+       list_for_each_entry_safe(page, next, &b->refused_pages, lru) {
+               list_del(&page->lru);
+               __free_page(page);
+               STATS_INC(b->stats.refused_free);
+       }
+}
+
+/*
+ * Inflate the balloon towards its target size. Note that we try to limit
+ * the rate of allocation to make sure we are not choking the rest of the
+ * system.
+ */
+static void vmballoon_inflate(struct vmballoon *b)
+{
+       unsigned int goal;
+       unsigned int rate;
+       unsigned int i;
+       unsigned int allocations = 0;
+       int error = 0;
+       bool alloc_can_sleep = false;
+
+       pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
+
+       /*
+        * First try NOSLEEP page allocations to inflate balloon.
+        *
+        * If we do not throttle nosleep allocations, we can drain all
+        * free pages in the guest quickly (if the balloon target is high).
+        * As a side-effect, draining free pages helps to inform (force)
+        * the guest to start swapping if balloon target is not met yet,
+        * which is a desired behavior. However, balloon driver can consume
+        * all available CPU cycles if too many pages are allocated in a
+        * second. Therefore, we throttle nosleep allocations even when
+        * the guest is not under memory pressure. OTOH, if we have already
+        * predicted that the guest is under memory pressure, then we
+        * slowdown page allocations considerably.
+        */
+
+       goal = b->target - b->size;
+       /*
+        * Start with no sleep allocation rate which may be higher
+        * than sleeping allocation rate.
+        */
+       rate = b->slow_allocation_cycles ?
+                       b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX;
+
+       pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n",
+                __func__, goal, rate, b->rate_alloc);
+
+       for (i = 0; i < goal; i++) {
+
+               error = vmballoon_reserve_page(b, alloc_can_sleep);
+               if (error) {
+                       if (error != -ENOMEM) {
+                               /*
+                                * Not a page allocation failure, stop this
+                                * cycle. Maybe we'll get new target from
+                                * the host soon.
+                                */
+                               break;
+                       }
+
+                       if (alloc_can_sleep) {
+                               /*
+                                * CANSLEEP page allocation failed, so guest
+                                * is under severe memory pressure. Quickly
+                                * decrease allocation rate.
+                                */
+                               b->rate_alloc = max(b->rate_alloc / 2,
+                                                   VMW_BALLOON_RATE_ALLOC_MIN);
+                               break;
+                       }
+
+                       /*
+                        * NOSLEEP page allocation failed, so the guest is
+                        * under memory pressure. Let us slow down page
+                        * allocations for next few cycles so that the guest
+                        * gets out of memory pressure. Also, if we already
+                        * allocated b->rate_alloc pages, let's pause,
+                        * otherwise switch to sleeping allocations.
+                        */
+                       b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
+
+                       if (i >= b->rate_alloc)
+                               break;
+
+                       alloc_can_sleep = true;
+                       /* Lower rate for sleeping allocations. */
+                       rate = b->rate_alloc;
+               }
+
+               if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
+                       cond_resched();
+                       allocations = 0;
+               }
+
+               if (i >= rate) {
+                       /* We allocated enough pages, let's take a break. */
+                       break;
+               }
+       }
+
+       /*
+        * We reached our goal without failures so try increasing
+        * allocation rate.
+        */
+       if (error == 0 && i >= b->rate_alloc) {
+               unsigned int mult = i / b->rate_alloc;
+
+               b->rate_alloc =
+                       min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
+                           VMW_BALLOON_RATE_ALLOC_MAX);
+       }
+
+       vmballoon_release_refused_pages(b);
+}
+
+/*
+ * Decrease the size of the balloon allowing guest to use more memory.
+ */
+static void vmballoon_deflate(struct vmballoon *b)
+{
+       struct page *page, *next;
+       unsigned int i = 0;
+       unsigned int goal;
+       int error;
+
+       pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
+
+       /* limit deallocation rate */
+       goal = min(b->size - b->target, b->rate_free);
+
+       pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free);
+
+       /* free pages to reach target */
+       list_for_each_entry_safe(page, next, &b->pages, lru) {
+               error = vmballoon_release_page(b, page);
+               if (error) {
+                       /* quickly decrease rate in case of error */
+                       b->rate_free = max(b->rate_free / 2,
+                                          VMW_BALLOON_RATE_FREE_MIN);
+                       return;
+               }
+
+               if (++i >= goal)
+                       break;
+       }
+
+       /* slowly increase rate if there were no errors */
+       b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC,
+                          VMW_BALLOON_RATE_FREE_MAX);
+}
+
+/*
+ * Balloon work function: reset protocol, if needed, get the new size and
+ * adjust balloon as needed. Repeat in 1 sec.
+ */
+static void vmballoon_work(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
+       unsigned int target;
+
+       STATS_INC(b->stats.timer);
+
+       if (b->reset_required)
+               vmballoon_reset(b);
+
+       if (b->slow_allocation_cycles > 0)
+               b->slow_allocation_cycles--;
+
+       if (vmballoon_send_get_target(b, &target)) {
+               /* update target, adjust size */
+               b->target = target;
+
+               if (b->size < target)
+                       vmballoon_inflate(b);
+               else if (b->size > target)
+                       vmballoon_deflate(b);
+       }
+
+       queue_delayed_work(vmballoon_wq, dwork, round_jiffies_relative(HZ));
+}
+
+/*
+ * DEBUGFS Interface
+ */
+#ifdef CONFIG_DEBUG_FS
+
+static int vmballoon_debug_show(struct seq_file *f, void *offset)
+{
+       struct vmballoon *b = f->private;
+       struct vmballoon_stats *stats = &b->stats;
+
+       /* format size info */
+       seq_printf(f,
+                  "target:             %8d pages\n"
+                  "current:            %8d pages\n",
+                  b->target, b->size);
+
+       /* format rate info */
+       seq_printf(f,
+                  "rateNoSleepAlloc:   %8d pages/sec\n"
+                  "rateSleepAlloc:     %8d pages/sec\n"
+                  "rateFree:           %8d pages/sec\n",
+                  VMW_BALLOON_NOSLEEP_ALLOC_MAX,
+                  b->rate_alloc, b->rate_free);
+
+       seq_printf(f,
+                  "\n"
+                  "timer:              %8u\n"
+                  "start:              %8u (%4u failed)\n"
+                  "guestType:          %8u (%4u failed)\n"
+                  "lock:               %8u (%4u failed)\n"
+                  "unlock:             %8u (%4u failed)\n"
+                  "target:             %8u (%4u failed)\n"
+                  "primNoSleepAlloc:   %8u (%4u failed)\n"
+                  "primCanSleepAlloc:  %8u (%4u failed)\n"
+                  "primFree:           %8u\n"
+                  "errAlloc:           %8u\n"
+                  "errFree:            %8u\n",
+                  stats->timer,
+                  stats->start, stats->start_fail,
+                  stats->guest_type, stats->guest_type_fail,
+                  stats->lock,  stats->lock_fail,
+                  stats->unlock, stats->unlock_fail,
+                  stats->target, stats->target_fail,
+                  stats->alloc, stats->alloc_fail,
+                  stats->sleep_alloc, stats->sleep_alloc_fail,
+                  stats->free,
+                  stats->refused_alloc, stats->refused_free);
+
+       return 0;
+}
+
+static int vmballoon_debug_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, vmballoon_debug_show, inode->i_private);
+}
+
+static const struct file_operations vmballoon_debug_fops = {
+       .owner          = THIS_MODULE,
+       .open           = vmballoon_debug_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int __init vmballoon_debugfs_init(struct vmballoon *b)
+{
+       int error;
+
+       b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
+                                          &vmballoon_debug_fops);
+       if (IS_ERR(b->dbg_entry)) {
+               error = PTR_ERR(b->dbg_entry);
+               pr_err("failed to create debugfs entry, error: %d\n", error);
+               return error;
+       }
+
+       return 0;
+}
+
+static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
+{
+       debugfs_remove(b->dbg_entry);
+}
+
+#else
+
+static inline int vmballoon_debugfs_init(struct vmballoon *b)
+{
+       return 0;
+}
+
+static inline void vmballoon_debugfs_exit(struct vmballoon *b)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+static int __init vmballoon_init(void)
+{
+       int error;
+
+       /*
+        * Check if we are running on VMware's hypervisor and bail out
+        * if we are not.
+        */
+       if (!vmware_platform())
+               return -ENODEV;
+
+       vmballoon_wq = create_freezeable_workqueue("vmmemctl");
+       if (!vmballoon_wq) {
+               pr_err("failed to create workqueue\n");
+               return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&balloon.pages);
+       INIT_LIST_HEAD(&balloon.refused_pages);
+
+       /* initialize rates */
+       balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
+       balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX;
+
+       INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
+
+       /*
+        * Start balloon.
+        */
+       if (!vmballoon_send_start(&balloon)) {
+               pr_err("failed to send start command to the host\n");
+               error = -EIO;
+               goto fail;
+       }
+
+       if (!vmballoon_send_guest_id(&balloon)) {
+               pr_err("failed to send guest ID to the host\n");
+               error = -EIO;
+               goto fail;
+       }
+
+       error = vmballoon_debugfs_init(&balloon);
+       if (error)
+               goto fail;
+
+       queue_delayed_work(vmballoon_wq, &balloon.dwork, 0);
+
+       return 0;
+
+fail:
+       destroy_workqueue(vmballoon_wq);
+       return error;
+}
+module_init(vmballoon_init);
+
+static void __exit vmballoon_exit(void)
+{
+       cancel_delayed_work_sync(&balloon.dwork);
+       destroy_workqueue(vmballoon_wq);
+
+       vmballoon_debugfs_exit(&balloon);
+
+       /*
+        * Deallocate all reserved memory, and reset connection with monitor.
+        * Reset connection before deallocating memory to avoid potential for
+        * additional spurious resets from guest touching deallocated pages.
+        */
+       vmballoon_send_start(&balloon);
+       vmballoon_pop(&balloon);
+}
+module_exit(vmballoon_exit);
index 82d1e4d..4521b1e 100644 (file)
@@ -4,7 +4,7 @@
 
 # Core functionality.
 obj-$(CONFIG_MTD)              += mtd.o
-mtd-y                          := mtdcore.o mtdsuper.o mtdbdi.o
+mtd-y                          := mtdcore.o mtdsuper.o
 mtd-$(CONFIG_MTD_PARTITIONS)   += mtdpart.o
 
 obj-$(CONFIG_MTD_CONCAT)       += mtdconcat.o
index c658fe7..e69de29 100644 (file)
@@ -1,17 +0,0 @@
-/* Internal MTD definitions
- *
- * Copyright © 2006 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/*
- * mtdbdi.c
- */
-extern struct backing_dev_info mtd_bdi_unmappable;
-extern struct backing_dev_info mtd_bdi_ro_mappable;
-extern struct backing_dev_info mtd_bdi_rw_mappable;
index 5ca5aed..e69de29 100644 (file)
@@ -1,43 +0,0 @@
-/* MTD backing device capabilities
- *
- * Copyright © 2006 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/backing-dev.h>
-#include <linux/mtd/mtd.h>
-#include "internal.h"
-
-/*
- * backing device capabilities for non-mappable devices (such as NAND flash)
- * - permits private mappings, copies are taken of the data
- */
-struct backing_dev_info mtd_bdi_unmappable = {
-       .capabilities   = BDI_CAP_MAP_COPY,
-};
-
-/*
- * backing device capabilities for R/O mappable devices (such as ROM)
- * - permits private mappings, copies are taken of the data
- * - permits non-writable shared mappings
- */
-struct backing_dev_info mtd_bdi_ro_mappable = {
-       .capabilities   = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
-                          BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
-};
-
-/*
- * backing device capabilities for writable mappable devices (such as RAM)
- * - permits private mappings, copies are taken of the data
- * - permits non-writable shared mappings
- */
-struct backing_dev_info mtd_bdi_rw_mappable = {
-       .capabilities   = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
-                          BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
-                          BDI_CAP_WRITE_MAP),
-};
index 5b38b17..b177e75 100644 (file)
@@ -2,6 +2,9 @@
  * Core registration and callback routines for MTD
  * drivers and users.
  *
+ * bdi bits are:
+ * Copyright © 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
  */
 
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/mtd/compatmac.h>
 #include <linux/proc_fs.h>
+#include <linux/backing-dev.h>
 
 #include <linux/mtd/mtd.h>
-#include "internal.h"
 
 #include "mtdcore.h"
+/*
+ * backing device capabilities for non-mappable devices (such as NAND flash)
+ * - permits private mappings, copies are taken of the data
+ */
+struct backing_dev_info mtd_bdi_unmappable = {
+       .capabilities   = BDI_CAP_MAP_COPY,
+};
+
+/*
+ * backing device capabilities for R/O mappable devices (such as ROM)
+ * - permits private mappings, copies are taken of the data
+ * - permits non-writable shared mappings
+ */
+struct backing_dev_info mtd_bdi_ro_mappable = {
+       .capabilities   = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
+                          BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
+};
+
+/*
+ * backing device capabilities for writable mappable devices (such as RAM)
+ * - permits private mappings, copies are taken of the data
+ * - permits non-writable shared mappings
+ */
+struct backing_dev_info mtd_bdi_rw_mappable = {
+       .capabilities   = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
+                          BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
+                          BDI_CAP_WRITE_MAP),
+};
 
 static int mtd_cls_suspend(struct device *dev, pm_message_t state);
 static int mtd_cls_resume(struct device *dev);
@@ -628,20 +659,55 @@ done:
 /*====================================================================*/
 /* Init code */
 
+static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
+{
+       int ret;
+
+       ret = bdi_init(bdi);
+       if (!ret)
+               ret = bdi_register(bdi, NULL, name);
+
+       if (ret)
+               bdi_destroy(bdi);
+
+       return ret;
+}
+
 static int __init init_mtd(void)
 {
        int ret;
+
        ret = class_register(&mtd_class);
+       if (ret)
+               goto err_reg;
+
+       ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap");
+       if (ret)
+               goto err_bdi1;
+
+       ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
+       if (ret)
+               goto err_bdi2;
+
+       ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
+       if (ret)
+               goto err_bdi3;
 
-       if (ret) {
-               pr_err("Error registering mtd class: %d\n", ret);
-               return ret;
-       }
 #ifdef CONFIG_PROC_FS
        if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
                proc_mtd->read_proc = mtd_read_proc;
 #endif /* CONFIG_PROC_FS */
        return 0;
+
+err_bdi3:
+       bdi_destroy(&mtd_bdi_ro_mappable);
+err_bdi2:
+       bdi_destroy(&mtd_bdi_unmappable);
+err_bdi1:
+       class_unregister(&mtd_class);
+err_reg:
+       pr_err("Error registering mtd class or bdi: %d\n", ret);
+       return ret;
 }
 
 static void __exit cleanup_mtd(void)
@@ -651,6 +717,9 @@ static void __exit cleanup_mtd(void)
                remove_proc_entry( "mtd", NULL);
 #endif /* CONFIG_PROC_FS */
        class_unregister(&mtd_class);
+       bdi_destroy(&mtd_bdi_unmappable);
+       bdi_destroy(&mtd_bdi_ro_mappable);
+       bdi_destroy(&mtd_bdi_rw_mappable);
 }
 
 module_init(init_mtd);
index af8b42e..7c00319 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/mtd/super.h>
 #include <linux/namei.h>
 #include <linux/ctype.h>
+#include <linux/slab.h>
 
 /*
  * compare superblocks to see if they're equivalent
@@ -44,6 +45,7 @@ static int get_sb_mtd_set(struct super_block *sb, void *_mtd)
 
        sb->s_mtd = mtd;
        sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
+       sb->s_bdi = mtd->backing_dev_info;
        return 0;
 }
 
index f59c074..d60fc57 100644 (file)
@@ -60,7 +60,13 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
        }
        buf64 = (uint64_t *)buf;
        while (i < len/8) {
-               uint64_t x;
+               /*
+                * Since GCC has no proper constraint (PR 43518)
+                * force x variable to r2/r3 registers as ldrd instruction
+                * requires first register to be even.
+                */
+               register uint64_t x asm ("r2");
+
                asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base));
                buf64[i++] = x;
        }
index a03d291..f0d23de 100644 (file)
@@ -1944,7 +1944,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
                netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n",
                          __func__, rx_status, rx_size, cur_rx);
 #if RTL8139_DEBUG > 2
-               print_dump_hex(KERN_DEBUG, "Frame contents: ",
+               print_hex_dump(KERN_DEBUG, "Frame contents: ",
                               DUMP_PREFIX_OFFSET, 16, 1,
                               &rx_ring[ring_offset], 70, true);
 #endif
index a583b50..12b280a 100644 (file)
@@ -273,6 +273,7 @@ obj-$(CONFIG_USB_RTL8150)       += usb/
 obj-$(CONFIG_USB_HSO)          += usb/
 obj-$(CONFIG_USB_USBNET)        += usb/
 obj-$(CONFIG_USB_ZD1201)        += usb/
+obj-$(CONFIG_USB_IPHETH)        += usb/
 
 obj-y += wireless/
 obj-$(CONFIG_NET_TULIP) += tulip/
index a257bab..ac90a38 100644 (file)
@@ -58,8 +58,8 @@
 #include "bnx2_fw.h"
 
 #define DRV_MODULE_NAME                "bnx2"
-#define DRV_MODULE_VERSION     "2.0.8"
-#define DRV_MODULE_RELDATE     "Feb 15, 2010"
+#define DRV_MODULE_VERSION     "2.0.9"
+#define DRV_MODULE_RELDATE     "April 27, 2010"
 #define FW_MIPS_FILE_06                "bnx2/bnx2-mips-06-5.0.0.j6.fw"
 #define FW_RV2P_FILE_06                "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
 #define FW_MIPS_FILE_09                "bnx2/bnx2-mips-09-5.0.0.j9.fw"
@@ -651,9 +651,10 @@ bnx2_napi_enable(struct bnx2 *bp)
 }
 
 static void
-bnx2_netif_stop(struct bnx2 *bp)
+bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
 {
-       bnx2_cnic_stop(bp);
+       if (stop_cnic)
+               bnx2_cnic_stop(bp);
        if (netif_running(bp->dev)) {
                int i;
 
@@ -671,14 +672,15 @@ bnx2_netif_stop(struct bnx2 *bp)
 }
 
 static void
-bnx2_netif_start(struct bnx2 *bp)
+bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
 {
        if (atomic_dec_and_test(&bp->intr_sem)) {
                if (netif_running(bp->dev)) {
                        netif_tx_wake_all_queues(bp->dev);
                        bnx2_napi_enable(bp);
                        bnx2_enable_int(bp);
-                       bnx2_cnic_start(bp);
+                       if (start_cnic)
+                               bnx2_cnic_start(bp);
                }
        }
 }
@@ -4759,8 +4761,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
                rc = bnx2_alloc_bad_rbuf(bp);
        }
 
-       if (bp->flags & BNX2_FLAG_USING_MSIX)
+       if (bp->flags & BNX2_FLAG_USING_MSIX) {
                bnx2_setup_msix_tbl(bp);
+               /* Prevent MSIX table reads and write from timing out */
+               REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
+                       BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
+       }
 
        return rc;
 }
@@ -6273,12 +6279,12 @@ bnx2_reset_task(struct work_struct *work)
                return;
        }
 
-       bnx2_netif_stop(bp);
+       bnx2_netif_stop(bp, true);
 
        bnx2_init_nic(bp, 1);
 
        atomic_set(&bp->intr_sem, 1);
-       bnx2_netif_start(bp);
+       bnx2_netif_start(bp, true);
        rtnl_unlock();
 }
 
@@ -6320,7 +6326,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
        struct bnx2 *bp = netdev_priv(dev);
 
        if (netif_running(dev))
-               bnx2_netif_stop(bp);
+               bnx2_netif_stop(bp, false);
 
        bp->vlgrp = vlgrp;
 
@@ -6331,7 +6337,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
        if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
                bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
 
-       bnx2_netif_start(bp);
+       bnx2_netif_start(bp, false);
 }
 #endif
 
@@ -7051,9 +7057,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
        bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
 
        if (netif_running(bp->dev)) {
-               bnx2_netif_stop(bp);
+               bnx2_netif_stop(bp, true);
                bnx2_init_nic(bp, 0);
-               bnx2_netif_start(bp);
+               bnx2_netif_start(bp, true);
        }
 
        return 0;
@@ -7083,7 +7089,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
                /* Reset will erase chipset stats; save them */
                bnx2_save_stats(bp);
 
-               bnx2_netif_stop(bp);
+               bnx2_netif_stop(bp, true);
                bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
                bnx2_free_skbs(bp);
                bnx2_free_mem(bp);
@@ -7111,7 +7117,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
                        bnx2_setup_cnic_irq_info(bp);
                mutex_unlock(&bp->cnic_lock);
 #endif
-               bnx2_netif_start(bp);
+               bnx2_netif_start(bp, true);
        }
        return 0;
 }
@@ -7364,7 +7370,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
        if (etest->flags & ETH_TEST_FL_OFFLINE) {
                int i;
 
-               bnx2_netif_stop(bp);
+               bnx2_netif_stop(bp, true);
                bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
                bnx2_free_skbs(bp);
 
@@ -7383,7 +7389,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
                        bnx2_shutdown_chip(bp);
                else {
                        bnx2_init_nic(bp, 1);
-                       bnx2_netif_start(bp);
+                       bnx2_netif_start(bp, true);
                }
 
                /* wait for link up */
@@ -8377,7 +8383,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
                return 0;
 
        flush_scheduled_work();
-       bnx2_netif_stop(bp);
+       bnx2_netif_stop(bp, true);
        netif_device_detach(dev);
        del_timer_sync(&bp->timer);
        bnx2_shutdown_chip(bp);
@@ -8399,7 +8405,7 @@ bnx2_resume(struct pci_dev *pdev)
        bnx2_set_power_state(bp, PCI_D0);
        netif_device_attach(dev);
        bnx2_init_nic(bp, 1);
-       bnx2_netif_start(bp);
+       bnx2_netif_start(bp, true);
        return 0;
 }
 
@@ -8426,7 +8432,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
        }
 
        if (netif_running(dev)) {
-               bnx2_netif_stop(bp);
+               bnx2_netif_stop(bp, true);
                del_timer_sync(&bp->timer);
                bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
        }
@@ -8483,7 +8489,7 @@ static void bnx2_io_resume(struct pci_dev *pdev)
 
        rtnl_lock();
        if (netif_running(dev))
-               bnx2_netif_start(bp);
+               bnx2_netif_start(bp, true);
 
        netif_device_attach(dev);
        rtnl_unlock();
index 3345109..d800b59 100644 (file)
@@ -1006,7 +1006,7 @@ static int ems_usb_probe(struct usb_interface *intf,
 
        netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS);
        if (!netdev) {
-               dev_err(netdev->dev.parent, "Couldn't alloc candev\n");
+               dev_err(&intf->dev, "ems_usb: Couldn't alloc candev\n");
                return -ENOMEM;
        }
 
@@ -1036,20 +1036,20 @@ static int ems_usb_probe(struct usb_interface *intf,
 
        dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!dev->intr_urb) {
-               dev_err(netdev->dev.parent, "Couldn't alloc intr URB\n");
+               dev_err(&intf->dev, "Couldn't alloc intr URB\n");
                goto cleanup_candev;
        }
 
        dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL);
        if (!dev->intr_in_buffer) {
-               dev_err(netdev->dev.parent, "Couldn't alloc Intr buffer\n");
+               dev_err(&intf->dev, "Couldn't alloc Intr buffer\n");
                goto cleanup_intr_urb;
        }
 
        dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE +
                                     sizeof(struct ems_cpc_msg), GFP_KERNEL);
        if (!dev->tx_msg_buffer) {
-               dev_err(netdev->dev.parent, "Couldn't alloc Tx buffer\n");
+               dev_err(&intf->dev, "Couldn't alloc Tx buffer\n");
                goto cleanup_intr_in_buffer;
        }
 
index 5248f9e..35cd367 100644 (file)
@@ -934,7 +934,7 @@ static struct cphy_ops xaui_direct_ops = {
 int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
                            int phy_addr, const struct mdio_ops *mdio_ops)
 {
-       cphy_init(phy, adapter, MDIO_PRTAD_NONE, &xaui_direct_ops, mdio_ops,
+       cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
                  SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
                  "10GBASE-CX4");
        return 0;
index aced6c5..e3f1b85 100644 (file)
@@ -439,7 +439,7 @@ static void free_irq_resources(struct adapter *adapter)
 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
                              unsigned long n)
 {
-       int attempts = 5;
+       int attempts = 10;
 
        while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
                if (!--attempts)
index b997e57..7910803 100644 (file)
 #include <linux/ethtool.h>
 #include <linux/string.h>
 #include <linux/firmware.h>
+#include <linux/rtnetlink.h>
 #include <asm/unaligned.h>
 
 
@@ -2265,8 +2266,13 @@ static void e100_tx_timeout_task(struct work_struct *work)
 
        DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
                ioread8(&nic->csr->scb.status));
-       e100_down(netdev_priv(netdev));
-       e100_up(netdev_priv(netdev));
+
+       rtnl_lock();
+       if (netif_running(netdev)) {
+               e100_down(netdev_priv(netdev));
+               e100_up(netdev_priv(netdev));
+       }
+       rtnl_unlock();
 }
 
 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
index 712ccc6..9015555 100644 (file)
@@ -336,7 +336,6 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
        struct e1000_hw *hw = &adapter->hw;
        static int global_quad_port_a; /* global port a indication */
        struct pci_dev *pdev = adapter->pdev;
-       u16 eeprom_data = 0;
        int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
        s32 rc;
 
@@ -387,16 +386,15 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
                if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)
                        adapter->flags &= ~FLAG_HAS_WOL;
                break;
-
        case e1000_82573:
+       case e1000_82574:
+       case e1000_82583:
+               /* Disable ASPM L0s due to hardware errata */
+               e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L0S);
+
                if (pdev->device == E1000_DEV_ID_82573L) {
-                       if (e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1,
-                                      &eeprom_data) < 0)
-                               break;
-                       if (!(eeprom_data & NVM_WORD1A_ASPM_MASK)) {
-                               adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
-                               adapter->max_hw_frame_size = DEFAULT_JUMBO;
-                       }
+                       adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
+                       adapter->max_hw_frame_size = DEFAULT_JUMBO;
                }
                break;
        default:
@@ -1792,6 +1790,7 @@ struct e1000_info e1000_82571_info = {
                                  | FLAG_RESET_OVERWRITES_LAA /* errata */
                                  | FLAG_TARC_SPEED_MODE_BIT /* errata */
                                  | FLAG_APME_CHECK_PORT_B,
+       .flags2                 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */
        .pba                    = 38,
        .max_hw_frame_size      = DEFAULT_JUMBO,
        .get_variants           = e1000_get_variants_82571,
@@ -1809,6 +1808,7 @@ struct e1000_info e1000_82572_info = {
                                  | FLAG_RX_CSUM_ENABLED
                                  | FLAG_HAS_CTRLEXT_ON_LOAD
                                  | FLAG_TARC_SPEED_MODE_BIT, /* errata */
+       .flags2                 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */
        .pba                    = 38,
        .max_hw_frame_size      = DEFAULT_JUMBO,
        .get_variants           = e1000_get_variants_82571,
@@ -1820,13 +1820,11 @@ struct e1000_info e1000_82572_info = {
 struct e1000_info e1000_82573_info = {
        .mac                    = e1000_82573,
        .flags                  = FLAG_HAS_HW_VLAN_FILTER
-                                 | FLAG_HAS_JUMBO_FRAMES
                                  | FLAG_HAS_WOL
                                  | FLAG_APME_IN_CTRL3
                                  | FLAG_RX_CSUM_ENABLED
                                  | FLAG_HAS_SMART_POWER_DOWN
                                  | FLAG_HAS_AMT
-                                 | FLAG_HAS_ERT
                                  | FLAG_HAS_SWSM_ON_LOAD,
        .pba                    = 20,
        .max_hw_frame_size      = ETH_FRAME_LEN + ETH_FCS_LEN,
index 118bdf4..ee32b9b 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/io.h>
 #include <linux/netdevice.h>
 #include <linux/pci.h>
+#include <linux/pci-aspm.h>
 
 #include "hw.h"
 
@@ -374,7 +375,7 @@ struct e1000_adapter {
 struct e1000_info {
        enum e1000_mac_type     mac;
        unsigned int            flags;
-       unsigned int            flags2;
+       unsigned int            flags2;
        u32                     pba;
        u32                     max_hw_frame_size;
        s32                     (*get_variants)(struct e1000_adapter *);
@@ -421,6 +422,7 @@ struct e1000_info {
 #define FLAG2_CRC_STRIPPING               (1 << 0)
 #define FLAG2_HAS_PHY_WAKEUP              (1 << 1)
 #define FLAG2_IS_DISCARDING               (1 << 2)
+#define FLAG2_DISABLE_ASPM_L1             (1 << 3)
 
 #define E1000_RX_DESC_PS(R, i)     \
        (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -461,6 +463,7 @@ extern void e1000e_update_stats(struct e1000_adapter *adapter);
 extern bool e1000e_has_link(struct e1000_adapter *adapter);
 extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
 extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
 
 extern unsigned int copybreak;
 
index 73d43c5..fb8fc7d 100644 (file)
@@ -4283,6 +4283,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
                return -EINVAL;
        }
 
+       /* 82573 Errata 17 */
+       if (((adapter->hw.mac.type == e1000_82573) ||
+            (adapter->hw.mac.type == e1000_82574)) &&
+           (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
+               adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
+               e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
+       }
+
        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
                msleep(1);
        /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
@@ -4605,29 +4613,39 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
        }
 }
 
-static void e1000e_disable_l1aspm(struct pci_dev *pdev)
+#ifdef CONFIG_PCIEASPM
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+       pci_disable_link_state(pdev, state);
+}
+#else
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
 {
        int pos;
-       u16 val;
+       u16 reg16;
 
        /*
-        * 82573 workaround - disable L1 ASPM on mobile chipsets
-        *
-        * L1 ASPM on various mobile (ich7) chipsets do not behave properly
-        * resulting in lost data or garbage information on the pci-e link
-        * level. This could result in (false) bad EEPROM checksum errors,
-        * long ping times (up to 2s) or even a system freeze/hang.
-        *
-        * Unfortunately this feature saves about 1W power consumption when
-        * active.
+        * Both device and parent should have the same ASPM setting.
+        * Disable ASPM in downstream component first and then upstream.
         */
-       pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-       pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val);
-       if (val & 0x2) {
-               dev_warn(&pdev->dev, "Disabling L1 ASPM\n");
-               val &= ~0x2;
-               pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val);
-       }
+       pos = pci_pcie_cap(pdev);
+       pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+       reg16 &= ~state;
+       pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+
+       pos = pci_pcie_cap(pdev->bus->self);
+       pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
+       reg16 &= ~state;
+       pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
+}
+#endif
+void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+       dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
+                (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
+                (state & PCIE_LINK_STATE_L1) ? "L1" : "");
+
+       __e1000e_disable_aspm(pdev, state);
 }
 
 #ifdef CONFIG_PM
@@ -4653,7 +4671,8 @@ static int e1000_resume(struct pci_dev *pdev)
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
        pci_save_state(pdev);
-       e1000e_disable_l1aspm(pdev);
+       if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+               e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
 
        err = pci_enable_device_mem(pdev);
        if (err) {
@@ -4795,7 +4814,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
        int err;
        pci_ers_result_t result;
 
-       e1000e_disable_l1aspm(pdev);
+       if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+               e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
        err = pci_enable_device_mem(pdev);
        if (err) {
                dev_err(&pdev->dev,
@@ -4889,13 +4909,6 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
                dev_warn(&adapter->pdev->dev,
                         "Warning: detected DSPD enabled in EEPROM\n");
        }
-
-       ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
-       if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) {
-               /* ASPM enable */
-               dev_warn(&adapter->pdev->dev,
-                        "Warning: detected ASPM enabled in EEPROM\n");
-       }
 }
 
 static const struct net_device_ops e1000e_netdev_ops = {
@@ -4944,7 +4957,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
        u16 eeprom_data = 0;
        u16 eeprom_apme_mask = E1000_EEPROM_APME;
 
-       e1000e_disable_l1aspm(pdev);
+       if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
+               e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
 
        err = pci_enable_device_mem(pdev);
        if (err)
index d5160ed..3acac5f 100644 (file)
@@ -205,8 +205,6 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
 static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
 {
        struct gfar __iomem *enet_regs;
-       u32 __iomem *ioremap_tbipa;
-       u64 addr, size;
 
        /*
         * This is mildly evil, but so is our hardware for doing this.
@@ -220,9 +218,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
                return &enet_regs->tbipa;
        } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
                        of_device_is_compatible(np, "fsl,etsec2-tbi")) {
-               addr = of_translate_address(np, of_get_address(np, 1, &size, NULL));
-               ioremap_tbipa = ioremap(addr, size);
-               return ioremap_tbipa;
+               return of_iomap(np, 1);
        } else
                return NULL;
 }
@@ -279,6 +275,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
        u32 __iomem *tbipa;
        struct mii_bus *new_bus;
        int tbiaddr = -1;
+       const u32 *addrp;
        u64 addr = 0, size = 0;
        int err = 0;
 
@@ -297,8 +294,19 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
        new_bus->priv = priv;
        fsl_pq_mdio_bus_name(new_bus->id, np);
 
+       addrp = of_get_address(np, 0, &size, NULL);
+       if (!addrp) {
+               err = -EINVAL;
+               goto err_free_bus;
+       }
+
        /* Set the PHY base address */
-       addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
+       addr = of_translate_address(np, addrp);
+       if (addr == OF_BAD_ADDR) {
+               err = -EINVAL;
+               goto err_free_bus;
+       }
+
        map = ioremap(addr, size);
        if (!map) {
                err = -ENOMEM;
index 080d1ce..4e97ca1 100644 (file)
@@ -549,12 +549,8 @@ static int gfar_parse_group(struct device_node *np,
                struct gfar_private *priv, const char *model)
 {
        u32 *queue_mask;
-       u64 addr, size;
-
-       addr = of_translate_address(np,
-                       of_get_address(np, 0, &size, NULL));
-       priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
 
+       priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
        if (!priv->gfargrp[priv->num_grps].regs)
                return -ENOMEM;
 
@@ -1515,9 +1511,9 @@ static void gfar_halt_nodisable(struct net_device *dev)
                tempval |= (DMACTRL_GRS | DMACTRL_GTS);
                gfar_write(&regs->dmactrl, tempval);
 
-               while (!(gfar_read(&regs->ievent) &
-                        (IEVENT_GRSC | IEVENT_GTSC)))
-                       cpu_relax();
+               spin_event_timeout(((gfar_read(&regs->ievent) &
+                        (IEVENT_GRSC | IEVENT_GTSC)) ==
+                        (IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
        }
 }
 
index b405a00..12fc0e7 100644 (file)
@@ -39,6 +39,8 @@
 #define IXGBE_82599_MC_TBL_SIZE   128
 #define IXGBE_82599_VFT_TBL_SIZE  128
 
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                                           ixgbe_link_speed speed,
@@ -69,8 +71,14 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
        if (hw->phy.multispeed_fiber) {
                /* Set up dual speed SFP+ support */
                mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+               mac->ops.disable_tx_laser =
+                                      &ixgbe_disable_tx_laser_multispeed_fiber;
+               mac->ops.enable_tx_laser =
+                                       &ixgbe_enable_tx_laser_multispeed_fiber;
                mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
        } else {
+               mac->ops.disable_tx_laser = NULL;
+               mac->ops.enable_tx_laser = NULL;
                mac->ops.flap_tx_laser = NULL;
                if ((mac->ops.get_media_type(hw) ==
                     ixgbe_media_type_backplane) &&
@@ -415,6 +423,44 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
        return status;
 }
 
+ /**
+  *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
+  *  @hw: pointer to hardware structure
+  *
+  *  The base drivers may require better control over SFP+ module
+  *  PHY states.  This includes selectively shutting down the Tx
+  *  laser on the PHY, effectively halting physical link.
+  **/
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+       u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+       /* Disable tx laser; allow 100us to go dark per spec */
+       esdp_reg |= IXGBE_ESDP_SDP3;
+       IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+       IXGBE_WRITE_FLUSH(hw);
+       udelay(100);
+}
+
+/**
+ *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  The base drivers may require better control over SFP+ module
+ *  PHY states.  This includes selectively turning on the Tx
+ *  laser on the PHY, effectively starting physical link.
+ **/
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+       u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+       /* Enable tx laser; allow 100ms to light up */
+       esdp_reg &= ~IXGBE_ESDP_SDP3;
+       IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+       IXGBE_WRITE_FLUSH(hw);
+       msleep(100);
+}
+
 /**
  *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
  *  @hw: pointer to hardware structure
@@ -429,23 +475,11 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
  **/
 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 {
-       u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
-
        hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
 
        if (hw->mac.autotry_restart) {
-               /* Disable tx laser; allow 100us to go dark per spec */
-               esdp_reg |= IXGBE_ESDP_SDP3;
-               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-               IXGBE_WRITE_FLUSH(hw);
-               udelay(100);
-
-               /* Enable tx laser; allow 100ms to light up */
-               esdp_reg &= ~IXGBE_ESDP_SDP3;
-               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-               IXGBE_WRITE_FLUSH(hw);
-               msleep(100);
-
+               ixgbe_disable_tx_laser_multispeed_fiber(hw);
+               ixgbe_enable_tx_laser_multispeed_fiber(hw);
                hw->mac.autotry_restart = false;
        }
 }
index 8f677cb..6c00ee4 100644 (file)
@@ -2982,6 +2982,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
        else
                ixgbe_configure_msi_and_legacy(adapter);
 
+       /* enable the optics */
+       if (hw->phy.multispeed_fiber)
+               hw->mac.ops.enable_tx_laser(hw);
+
        clear_bit(__IXGBE_DOWN, &adapter->state);
        ixgbe_napi_enable_all(adapter);
 
@@ -3243,6 +3247,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        /* signal that we are down to the interrupt handler */
        set_bit(__IXGBE_DOWN, &adapter->state);
 
+       /* power down the optics */
+       if (hw->phy.multispeed_fiber)
+               hw->mac.ops.disable_tx_laser(hw);
+
        /* disable receive for all VFs and wait one second */
        if (adapter->num_vfs) {
                /* ping all the active vfs to let them know we are going down */
@@ -6253,6 +6261,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                goto err_eeprom;
        }
 
+       /* power down the optics */
+       if (hw->phy.multispeed_fiber)
+               hw->mac.ops.disable_tx_laser(hw);
+
        init_timer(&adapter->watchdog_timer);
        adapter->watchdog_timer.function = &ixgbe_watchdog;
        adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -6400,16 +6412,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
        del_timer_sync(&adapter->sfp_timer);
        cancel_work_sync(&adapter->watchdog_task);
        cancel_work_sync(&adapter->sfp_task);
-       if (adapter->hw.phy.multispeed_fiber) {
-               struct ixgbe_hw *hw = &adapter->hw;
-               /*
-                * Restart clause 37 autoneg, disable and re-enable
-                * the tx laser, to clear & alert the link partner
-                * that it needs to restart autotry
-                */
-               hw->mac.autotry_restart = true;
-               hw->mac.ops.flap_tx_laser(hw);
-       }
        cancel_work_sync(&adapter->multispeed_fiber_task);
        cancel_work_sync(&adapter->sfp_config_module_task);
        if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
index 4ec6dc1..534affc 100644 (file)
@@ -2398,6 +2398,8 @@ struct ixgbe_mac_operations {
        s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
 
        /* Link */
+       void (*disable_tx_laser)(struct ixgbe_hw *);
+       void (*enable_tx_laser)(struct ixgbe_hw *);
        void (*flap_tx_laser)(struct ixgbe_hw *);
        s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
        s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
index 13cc1ca..9e9f9b3 100644 (file)
@@ -722,12 +722,14 @@ static void ks8851_tx_work(struct work_struct *work)
                txb = skb_dequeue(&ks->txq);
                last = skb_queue_empty(&ks->txq);
 
-               ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
-               ks8851_wrpkt(ks, txb, last);
-               ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
-               ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
+               if (txb != NULL) {
+                       ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
+                       ks8851_wrpkt(ks, txb, last);
+                       ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+                       ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
 
-               ks8851_done_tx(ks, txb);
+                       ks8851_done_tx(ks, txb);
+               }
        }
 
        mutex_unlock(&ks->lock);
index 3d1d3a7..757f87b 100644 (file)
@@ -781,8 +781,13 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
                  inw(ioaddr + EL3_STATUS));
 
        spin_lock_irqsave(&lp->window_lock, flags);
+
+       dev->stats.tx_bytes += skb->len;
+
+       /* Put out the doubleword header... */
        outw(skb->len, ioaddr + TX_FIFO);
        outw(0, ioaddr + TX_FIFO);
+       /* ... and the packet rounded to a doubleword. */
        outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2);
 
        dev->trans_start = jiffies;
@@ -1021,8 +1026,6 @@ static void update_stats(struct net_device *dev)
        /* BadSSD */                               inb(ioaddr + 12);
        up                                       = inb(ioaddr + 13);
 
-       dev->stats.tx_bytes                     += tx + ((up & 0xf0) << 12);
-
        EL3WINDOW(1);
 }
 
index fd9d6e3..ccc5537 100644 (file)
@@ -1804,23 +1804,30 @@ static void media_check(u_long arg)
     SMC_SELECT_BANK(1);
     media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1;
 
+    SMC_SELECT_BANK(saved_bank);
+    spin_unlock_irqrestore(&smc->lock, flags);
+
     /* Check for pending interrupt with watchdog flag set: with
        this, we can limp along even if the interrupt is blocked */
     if (smc->watchdog++ && ((i>>8) & i)) {
        if (!smc->fast_poll)
            printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+       local_irq_save(flags);
        smc_interrupt(dev->irq, dev);
+       local_irq_restore(flags);
        smc->fast_poll = HZ;
     }
     if (smc->fast_poll) {
        smc->fast_poll--;
        smc->media.expires = jiffies + HZ/100;
        add_timer(&smc->media);
-       SMC_SELECT_BANK(saved_bank);
-       spin_unlock_irqrestore(&smc->lock, flags);
        return;
     }
 
+    spin_lock_irqsave(&smc->lock, flags);
+
+    saved_bank = inw(ioaddr + BANK_SELECT);
+
     if (smc->cfg & CFG_MII_SELECT) {
        if (smc->mii_if.phy_id < 0)
            goto reschedule;
@@ -1978,15 +1985,16 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
        unsigned int ioaddr = dev->base_addr;
        u16 saved_bank = inw(ioaddr + BANK_SELECT);
        int ret;
+       unsigned long flags;
 
-       spin_lock_irq(&smc->lock);
+       spin_lock_irqsave(&smc->lock, flags);
        SMC_SELECT_BANK(3);
        if (smc->cfg & CFG_MII_SELECT)
                ret = mii_ethtool_gset(&smc->mii_if, ecmd);
        else
                ret = smc_netdev_get_ecmd(dev, ecmd);
        SMC_SELECT_BANK(saved_bank);
-       spin_unlock_irq(&smc->lock);
+       spin_unlock_irqrestore(&smc->lock, flags);
        return ret;
 }
 
@@ -1996,15 +2004,16 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
        unsigned int ioaddr = dev->base_addr;
        u16 saved_bank = inw(ioaddr + BANK_SELECT);
        int ret;
+       unsigned long flags;
 
-       spin_lock_irq(&smc->lock);
+       spin_lock_irqsave(&smc->lock, flags);
        SMC_SELECT_BANK(3);
        if (smc->cfg & CFG_MII_SELECT)
                ret = mii_ethtool_sset(&smc->mii_if, ecmd);
        else
                ret = smc_netdev_set_ecmd(dev, ecmd);
        SMC_SELECT_BANK(saved_bank);
-       spin_unlock_irq(&smc->lock);
+       spin_unlock_irqrestore(&smc->lock, flags);
        return ret;
 }
 
@@ -2014,12 +2023,13 @@ static u32 smc_get_link(struct net_device *dev)
        unsigned int ioaddr = dev->base_addr;
        u16 saved_bank = inw(ioaddr + BANK_SELECT);
        u32 ret;
+       unsigned long flags;
 
-       spin_lock_irq(&smc->lock);
+       spin_lock_irqsave(&smc->lock, flags);
        SMC_SELECT_BANK(3);
        ret = smc_link_ok(dev);
        SMC_SELECT_BANK(saved_bank);
-       spin_unlock_irq(&smc->lock);
+       spin_unlock_irqrestore(&smc->lock, flags);
        return ret;
 }
 
@@ -2056,16 +2066,17 @@ static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
        int rc = 0;
        u16 saved_bank;
        unsigned int ioaddr = dev->base_addr;
+       unsigned long flags;
 
        if (!netif_running(dev))
                return -EINVAL;
 
-       spin_lock_irq(&smc->lock);
+       spin_lock_irqsave(&smc->lock, flags);
        saved_bank = inw(ioaddr + BANK_SELECT);
        SMC_SELECT_BANK(3);
        rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL);
        SMC_SELECT_BANK(saved_bank);
-       spin_unlock_irq(&smc->lock);
+       spin_unlock_irqrestore(&smc->lock, flags);
        return rc;
 }
 
index dbb1f5a..4748c21 100644 (file)
@@ -2759,6 +2759,7 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
 {
        iounmap(ioaddr);
        pci_release_regions(pdev);
+       pci_clear_mwi(pdev);
        pci_disable_device(pdev);
        free_netdev(dev);
 }
@@ -2825,8 +2826,13 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
        spin_lock_irq(&tp->lock);
 
        RTL_W8(Cfg9346, Cfg9346_Unlock);
+
        RTL_W32(MAC4, high);
+       RTL_R32(MAC4);
+
        RTL_W32(MAC0, low);
+       RTL_R32(MAC0);
+
        RTL_W8(Cfg9346, Cfg9346_Lock);
 
        spin_unlock_irq(&tp->lock);
@@ -3014,9 +3020,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_free_dev_1;
        }
 
-       rc = pci_set_mwi(pdev);
-       if (rc < 0)
-               goto err_out_disable_2;
+       if (pci_set_mwi(pdev) < 0)
+               netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
 
        /* make sure PCI base addr 1 is MMIO */
        if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
@@ -3024,7 +3029,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                          "region #%d not an MMIO resource, aborting\n",
                          region);
                rc = -ENODEV;
-               goto err_out_mwi_3;
+               goto err_out_mwi_2;
        }
 
        /* check for weird/broken PCI region reporting */
@@ -3032,13 +3037,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                netif_err(tp, probe, dev,
                          "Invalid PCI region size(s), aborting\n");
                rc = -ENODEV;
-               goto err_out_mwi_3;
+               goto err_out_mwi_2;
        }
 
        rc = pci_request_regions(pdev, MODULENAME);
        if (rc < 0) {
                netif_err(tp, probe, dev, "could not request regions\n");
-               goto err_out_mwi_3;
+               goto err_out_mwi_2;
        }
 
        tp->cp_cmd = PCIMulRW | RxChkSum;
@@ -3051,7 +3056,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (rc < 0) {
                        netif_err(tp, probe, dev, "DMA configuration failed\n");
-                       goto err_out_free_res_4;
+                       goto err_out_free_res_3;
                }
        }
 
@@ -3060,7 +3065,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!ioaddr) {
                netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
                rc = -EIO;
-               goto err_out_free_res_4;
+               goto err_out_free_res_3;
        }
 
        tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
@@ -3102,7 +3107,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (i == ARRAY_SIZE(rtl_chip_info)) {
                dev_err(&pdev->dev,
                        "driver bug, MAC version not found in rtl_chip_info\n");
-               goto err_out_msi_5;
+               goto err_out_msi_4;
        }
        tp->chipset = i;
 
@@ -3167,7 +3172,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        rc = register_netdev(dev);
        if (rc < 0)
-               goto err_out_msi_5;
+               goto err_out_msi_4;
 
        pci_set_drvdata(pdev, dev);
 
@@ -3190,14 +3195,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 out:
        return rc;
 
-err_out_msi_5:
+err_out_msi_4:
        rtl_disable_msi(pdev, tp);
        iounmap(ioaddr);
-err_out_free_res_4:
+err_out_free_res_3:
        pci_release_regions(pdev);
-err_out_mwi_3:
+err_out_mwi_2:
        pci_clear_mwi(pdev);
-err_out_disable_2:
        pci_disable_device(pdev);
 err_out_free_dev_1:
        free_netdev(dev);
index 6486657..649a264 100644 (file)
@@ -1861,6 +1861,7 @@ out:
        }
 
        if (disabled) {
+               dev_close(efx->net_dev);
                EFX_ERR(efx, "has been disabled\n");
                efx->state = STATE_DISABLED;
        } else {
@@ -1884,8 +1885,7 @@ static void efx_reset_work(struct work_struct *data)
        }
 
        rtnl_lock();
-       if (efx_reset(efx, efx->reset_pending))
-               dev_close(efx->net_dev);
+       (void)efx_reset(efx, efx->reset_pending);
        rtnl_unlock();
 }
 
index d294d66..08278e7 100644 (file)
@@ -1320,7 +1320,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
 
        EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
 
-       falcon_probe_board(efx, board_rev);
+       rc = falcon_probe_board(efx, board_rev);
+       if (rc)
+               goto fail2;
 
        kfree(nvconfig);
        return 0;
index 5712fdd..c7a933a 100644 (file)
@@ -728,15 +728,7 @@ static const struct falcon_board_type board_types[] = {
        },
 };
 
-static const struct falcon_board_type falcon_dummy_board = {
-       .init           = efx_port_dummy_op_int,
-       .init_phy       = efx_port_dummy_op_void,
-       .fini           = efx_port_dummy_op_void,
-       .set_id_led     = efx_port_dummy_op_set_id_led,
-       .monitor        = efx_port_dummy_op_int,
-};
-
-void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
 {
        struct falcon_board *board = falcon_board(efx);
        u8 type_id = FALCON_BOARD_TYPE(revision_info);
@@ -754,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
                         (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
                         ? board->type->ref_model : board->type->gen_type,
                         'A' + board->major, board->minor);
+               return 0;
        } else {
                EFX_ERR(efx, "unknown board type %d\n", type_id);
-               board->type = &falcon_dummy_board;
+               return -ENODEV;
        }
 }
index 9351c03..3166baf 100644 (file)
@@ -156,7 +156,7 @@ extern struct efx_nic_type siena_a0_nic_type;
  **************************************************************************
  */
 
-extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
 
 /* TX data path */
 extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
index 38dcc42..e0c46f5 100644 (file)
@@ -456,8 +456,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
 
 static void siena_update_nic_stats(struct efx_nic *efx)
 {
-       while (siena_try_update_nic_stats(efx) == -EAGAIN)
-               cpu_relax();
+       int retry;
+
+       /* If we're unlucky enough to read statistics wduring the DMA, wait
+        * up to 10ms for it to finish (typically takes <500us) */
+       for (retry = 0; retry < 100; ++retry) {
+               if (siena_try_update_nic_stats(efx) == 0)
+                       return;
+               udelay(100);
+       }
+
+       /* Use the old values instead */
 }
 
 static void siena_start_nic_stats(struct efx_nic *efx)
index 22cf1c4..ecc41cf 100644 (file)
@@ -8633,6 +8633,7 @@ static int tg3_test_msi(struct tg3 *tp)
        pci_disable_msi(tp->pdev);
 
        tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+       tp->napi[0].irq_vec = tp->pdev->irq;
 
        err = tg3_request_irq(tp, 0);
        if (err)
index ba56ce4..5d58abc 100644 (file)
@@ -385,4 +385,26 @@ config USB_CDC_PHONET
          cellular modem, as found on most Nokia handsets with the
          "PC suite" USB profile.
 
+config USB_IPHETH
+       tristate "Apple iPhone USB Ethernet driver"
+       default n
+       ---help---
+         Module used to share Internet connection (tethering) from your
+         iPhone (Original, 3G and 3GS) to your system.
+         Note that you need userspace libraries and programs that are needed
+         to pair your device with your system and that understand the iPhone
+         protocol.
+
+         For more information: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver
+
+config USB_SIERRA_NET
+       tristate "USB-to-WWAN Driver for Sierra Wireless modems"
+       depends on USB_USBNET
+       default y
+       help
+         Choose this option if you have a Sierra Wireless USB-to-WWAN device.
+
+         To compile this driver as a module, choose M here: the
+         module will be called sierra_net.
+
 endmenu
index 82ea629..b13a279 100644 (file)
@@ -23,4 +23,6 @@ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
 obj-$(CONFIG_USB_USBNET)       += usbnet.o
 obj-$(CONFIG_USB_NET_INT51X1)  += int51x1.o
 obj-$(CONFIG_USB_CDC_PHONET)   += cdc-phonet.o
+obj-$(CONFIG_USB_IPHETH)       += ipheth.o
+obj-$(CONFIG_USB_SIERRA_NET)   += sierra_net.o
 
index c8cdb7f..3547cf1 100644 (file)
@@ -431,6 +431,7 @@ static const struct driver_info mbm_info = {
        .bind =         cdc_bind,
        .unbind =       usbnet_cdc_unbind,
        .status =       cdc_status,
+       .manage_power = cdc_manage_power,
 };
 
 /*-------------------------------------------------------------------------*/
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
new file mode 100644 (file)
index 0000000..418825d
--- /dev/null
@@ -0,0 +1,569 @@
+/*
+ * ipheth.c - Apple iPhone USB Ethernet driver
+ *
+ * Copyright (c) 2009 Diego Giagio <diego@giagio.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of GIAGIO.COM nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ *
+ * Attention: iPhone device must be paired, otherwise it won't respond to our
+ * driver. For more info: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/usb.h>
+#include <linux/workqueue.h>
+
+#define USB_VENDOR_APPLE        0x05ac
+#define USB_PRODUCT_IPHONE      0x1290
+#define USB_PRODUCT_IPHONE_3G   0x1292
+#define USB_PRODUCT_IPHONE_3GS  0x1294
+
+#define IPHETH_USBINTF_CLASS    255
+#define IPHETH_USBINTF_SUBCLASS 253
+#define IPHETH_USBINTF_PROTO    1
+
+#define IPHETH_BUF_SIZE         1516
+#define IPHETH_TX_TIMEOUT       (5 * HZ)
+
+#define IPHETH_INTFNUM          2
+#define IPHETH_ALT_INTFNUM      1
+
+#define IPHETH_CTRL_ENDP        0x00
+#define IPHETH_CTRL_BUF_SIZE    0x40
+#define IPHETH_CTRL_TIMEOUT     (5 * HZ)
+
+#define IPHETH_CMD_GET_MACADDR   0x00
+#define IPHETH_CMD_CARRIER_CHECK 0x45
+
+#define IPHETH_CARRIER_CHECK_TIMEOUT round_jiffies_relative(1 * HZ)
+#define IPHETH_CARRIER_ON       0x04
+
+static struct usb_device_id ipheth_table[] = {
+       { USB_DEVICE_AND_INTERFACE_INFO(
+               USB_VENDOR_APPLE, USB_PRODUCT_IPHONE,
+               IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+               IPHETH_USBINTF_PROTO) },
+       { USB_DEVICE_AND_INTERFACE_INFO(
+               USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3G,
+               IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+               IPHETH_USBINTF_PROTO) },
+       { USB_DEVICE_AND_INTERFACE_INFO(
+               USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
+               IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+               IPHETH_USBINTF_PROTO) },
+       { }
+};
+MODULE_DEVICE_TABLE(usb, ipheth_table);
+
+struct ipheth_device {
+       struct usb_device *udev;
+       struct usb_interface *intf;
+       struct net_device *net;
+       struct sk_buff *tx_skb;
+       struct urb *tx_urb;
+       struct urb *rx_urb;
+       unsigned char *tx_buf;
+       unsigned char *rx_buf;
+       unsigned char *ctrl_buf;
+       u8 bulk_in;
+       u8 bulk_out;
+       struct delayed_work carrier_work;
+};
+
+static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags);
+
+static int ipheth_alloc_urbs(struct ipheth_device *iphone)
+{
+       struct urb *tx_urb = NULL;
+       struct urb *rx_urb = NULL;
+       u8 *tx_buf = NULL;
+       u8 *rx_buf = NULL;
+
+       tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+       if (tx_urb == NULL)
+               goto error_nomem;
+
+       rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+       if (rx_urb == NULL)
+               goto free_tx_urb;
+
+       tx_buf = usb_buffer_alloc(iphone->udev,
+                                 IPHETH_BUF_SIZE,
+                                 GFP_KERNEL,
+                                 &tx_urb->transfer_dma);
+       if (tx_buf == NULL)
+               goto free_rx_urb;
+
+       rx_buf = usb_buffer_alloc(iphone->udev,
+                                 IPHETH_BUF_SIZE,
+                                 GFP_KERNEL,
+                                 &rx_urb->transfer_dma);
+       if (rx_buf == NULL)
+               goto free_tx_buf;
+
+
+       iphone->tx_urb = tx_urb;
+       iphone->rx_urb = rx_urb;
+       iphone->tx_buf = tx_buf;
+       iphone->rx_buf = rx_buf;
+       return 0;
+
+free_tx_buf:
+       usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf,
+                       tx_urb->transfer_dma);
+free_rx_urb:
+       usb_free_urb(rx_urb);
+free_tx_urb:
+       usb_free_urb(tx_urb);
+error_nomem:
+       return -ENOMEM;
+}
+
+static void ipheth_free_urbs(struct ipheth_device *iphone)
+{
+       usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
+                       iphone->rx_urb->transfer_dma);
+       usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
+                       iphone->tx_urb->transfer_dma);
+       usb_free_urb(iphone->rx_urb);
+       usb_free_urb(iphone->tx_urb);
+}
+
+static void ipheth_kill_urbs(struct ipheth_device *dev)
+{
+       usb_kill_urb(dev->tx_urb);
+       usb_kill_urb(dev->rx_urb);
+}
+
+static void ipheth_rcvbulk_callback(struct urb *urb)
+{
+       struct ipheth_device *dev;
+       struct sk_buff *skb;
+       int status;
+       char *buf;
+       int len;
+
+       dev = urb->context;
+       if (dev == NULL)
+               return;
+
+       status = urb->status;
+       switch (status) {
+       case -ENOENT:
+       case -ECONNRESET:
+       case -ESHUTDOWN:
+               return;
+       case 0:
+               break;
+       default:
+               err("%s: urb status: %d", __func__, urb->status);
+               return;
+       }
+
+       len = urb->actual_length;
+       buf = urb->transfer_buffer;
+
+       skb = dev_alloc_skb(NET_IP_ALIGN + len);
+       if (!skb) {
+               err("%s: dev_alloc_skb: -ENOMEM", __func__);
+               dev->net->stats.rx_dropped++;
+               return;
+       }
+
+       skb_reserve(skb, NET_IP_ALIGN);
+       memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN);
+       skb->dev = dev->net;
+       skb->protocol = eth_type_trans(skb, dev->net);
+
+       dev->net->stats.rx_packets++;
+       dev->net->stats.rx_bytes += len;
+
+       netif_rx(skb);
+       ipheth_rx_submit(dev, GFP_ATOMIC);
+}
+
+static void ipheth_sndbulk_callback(struct urb *urb)
+{
+       struct ipheth_device *dev;
+
+       dev = urb->context;
+       if (dev == NULL)
+               return;
+
+       if (urb->status != 0 &&
+           urb->status != -ENOENT &&
+           urb->status != -ECONNRESET &&
+           urb->status != -ESHUTDOWN)
+               err("%s: urb status: %d", __func__, urb->status);
+
+       dev_kfree_skb_irq(dev->tx_skb);
+       netif_wake_queue(dev->net);
+}
+
+static int ipheth_carrier_set(struct ipheth_device *dev)
+{
+       struct usb_device *udev = dev->udev;
+       int retval;
+
+       retval = usb_control_msg(udev,
+                       usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
+                       IPHETH_CMD_CARRIER_CHECK, /* request */
+                       0xc0, /* request type */
+                       0x00, /* value */
+                       0x02, /* index */
+                       dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE,
+                       IPHETH_CTRL_TIMEOUT);
+       if (retval < 0) {
+               err("%s: usb_control_msg: %d", __func__, retval);
+               return retval;
+       }
+
+       if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON)
+               netif_carrier_on(dev->net);
+       else
+               netif_carrier_off(dev->net);
+
+       return 0;
+}
+
+static void ipheth_carrier_check_work(struct work_struct *work)
+{
+       struct ipheth_device *dev = container_of(work, struct ipheth_device,
+                                                carrier_work.work);
+
+       ipheth_carrier_set(dev);
+       schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
+}
+
+static int ipheth_get_macaddr(struct ipheth_device *dev)
+{
+       struct usb_device *udev = dev->udev;
+       struct net_device *net = dev->net;
+       int retval;
+
+       retval = usb_control_msg(udev,
+                                usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
+                                IPHETH_CMD_GET_MACADDR, /* request */
+                                0xc0, /* request type */
+                                0x00, /* value */
+                                0x02, /* index */
+                                dev->ctrl_buf,
+                                IPHETH_CTRL_BUF_SIZE,
+                                IPHETH_CTRL_TIMEOUT);
+       if (retval < 0) {
+               err("%s: usb_control_msg: %d", __func__, retval);
+       } else if (retval < ETH_ALEN) {
+               err("%s: usb_control_msg: short packet: %d bytes",
+                       __func__, retval);
+               retval = -EINVAL;
+       } else {
+               memcpy(net->dev_addr, dev->ctrl_buf, ETH_ALEN);
+               retval = 0;
+       }
+
+       return retval;
+}
+
+static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags)
+{
+       struct usb_device *udev = dev->udev;
+       int retval;
+
+       usb_fill_bulk_urb(dev->rx_urb, udev,
+                         usb_rcvbulkpipe(udev, dev->bulk_in),
+                         dev->rx_buf, IPHETH_BUF_SIZE,
+                         ipheth_rcvbulk_callback,
+                         dev);
+       dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+       retval = usb_submit_urb(dev->rx_urb, mem_flags);
+       if (retval)
+               err("%s: usb_submit_urb: %d", __func__, retval);
+       return retval;
+}
+
+static int ipheth_open(struct net_device *net)
+{
+       struct ipheth_device *dev = netdev_priv(net);
+       struct usb_device *udev = dev->udev;
+       int retval = 0;
+
+       usb_set_interface(udev, IPHETH_INTFNUM, IPHETH_ALT_INTFNUM);
+
+       retval = ipheth_carrier_set(dev);
+       if (retval)
+               return retval;
+
+       retval = ipheth_rx_submit(dev, GFP_KERNEL);
+       if (retval)
+               return retval;
+
+       schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
+       netif_start_queue(net);
+       return retval;
+}
+
+static int ipheth_close(struct net_device *net)
+{
+       struct ipheth_device *dev = netdev_priv(net);
+
+       cancel_delayed_work_sync(&dev->carrier_work);
+       netif_stop_queue(net);
+       return 0;
+}
+
+static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
+{
+       struct ipheth_device *dev = netdev_priv(net);
+       struct usb_device *udev = dev->udev;
+       int retval;
+
+       /* Paranoid */
+       if (skb->len > IPHETH_BUF_SIZE) {
+               WARN(1, "%s: skb too large: %d bytes", __func__, skb->len);
+               dev->net->stats.tx_dropped++;
+               dev_kfree_skb_irq(skb);
+               return NETDEV_TX_OK;
+       }
+
+       memcpy(dev->tx_buf, skb->data, skb->len);
+       if (skb->len < IPHETH_BUF_SIZE)
+               memset(dev->tx_buf + skb->len, 0, IPHETH_BUF_SIZE - skb->len);
+
+       usb_fill_bulk_urb(dev->tx_urb, udev,
+                         usb_sndbulkpipe(udev, dev->bulk_out),
+                         dev->tx_buf, IPHETH_BUF_SIZE,
+                         ipheth_sndbulk_callback,
+                         dev);
+       dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+       retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC);
+       if (retval) {
+               err("%s: usb_submit_urb: %d", __func__, retval);
+               dev->net->stats.tx_errors++;
+               dev_kfree_skb_irq(skb);
+       } else {
+               dev->tx_skb = skb;
+
+               dev->net->stats.tx_packets++;
+               dev->net->stats.tx_bytes += skb->len;
+               netif_stop_queue(net);
+       }
+
+       return NETDEV_TX_OK;
+}
+
+static void ipheth_tx_timeout(struct net_device *net)
+{
+       struct ipheth_device *dev = netdev_priv(net);
+
+       err("%s: TX timeout", __func__);
+       dev->net->stats.tx_errors++;
+       usb_unlink_urb(dev->tx_urb);
+}
+
+static struct net_device_stats *ipheth_stats(struct net_device *net)
+{
+       struct ipheth_device *dev = netdev_priv(net);
+       return &dev->net->stats;
+}
+
+static u32 ipheth_ethtool_op_get_link(struct net_device *net)
+{
+       struct ipheth_device *dev = netdev_priv(net);
+       return netif_carrier_ok(dev->net);
+}
+
+static struct ethtool_ops ops = {
+       .get_link = ipheth_ethtool_op_get_link
+};
+
+static const struct net_device_ops ipheth_netdev_ops = {
+       .ndo_open = &ipheth_open,
+       .ndo_stop = &ipheth_close,
+       .ndo_start_xmit = &ipheth_tx,
+       .ndo_tx_timeout = &ipheth_tx_timeout,
+       .ndo_get_stats = &ipheth_stats,
+};
+
+static struct device_type ipheth_type = {
+       .name   = "wwan",
+};
+
+static int ipheth_probe(struct usb_interface *intf,
+                       const struct usb_device_id *id)
+{
+       struct usb_device *udev = interface_to_usbdev(intf);
+       struct usb_host_interface *hintf;
+       struct usb_endpoint_descriptor *endp;
+       struct ipheth_device *dev;
+       struct net_device *netdev;
+       int i;
+       int retval;
+
+       netdev = alloc_etherdev(sizeof(struct ipheth_device));
+       if (!netdev)
+               return -ENOMEM;
+
+       netdev->netdev_ops = &ipheth_netdev_ops;
+       netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
+       strcpy(netdev->name, "wwan%d");
+
+       dev = netdev_priv(netdev);
+       dev->udev = udev;
+       dev->net = netdev;
+       dev->intf = intf;
+
+       /* Set up endpoints */
+       hintf = usb_altnum_to_altsetting(intf, IPHETH_ALT_INTFNUM);
+       if (hintf == NULL) {
+               retval = -ENODEV;
+               err("Unable to find alternate settings interface");
+               goto err_endpoints;
+       }
+
+       for (i = 0; i < hintf->desc.bNumEndpoints; i++) {
+               endp = &hintf->endpoint[i].desc;
+               if (usb_endpoint_is_bulk_in(endp))
+                       dev->bulk_in = endp->bEndpointAddress;
+               else if (usb_endpoint_is_bulk_out(endp))
+                       dev->bulk_out = endp->bEndpointAddress;
+       }
+       if (!(dev->bulk_in && dev->bulk_out)) {
+               retval = -ENODEV;
+               err("Unable to find endpoints");
+               goto err_endpoints;
+       }
+
+       dev->ctrl_buf = kmalloc(IPHETH_CTRL_BUF_SIZE, GFP_KERNEL);
+       if (dev->ctrl_buf == NULL) {
+               retval = -ENOMEM;
+               goto err_alloc_ctrl_buf;
+       }
+
+       retval = ipheth_get_macaddr(dev);
+       if (retval)
+               goto err_get_macaddr;
+
+       INIT_DELAYED_WORK(&dev->carrier_work, ipheth_carrier_check_work);
+
+       retval = ipheth_alloc_urbs(dev);
+       if (retval) {
+               err("error allocating urbs: %d", retval);
+               goto err_alloc_urbs;
+       }
+
+       usb_set_intfdata(intf, dev);
+
+       SET_NETDEV_DEV(netdev, &intf->dev);
+       SET_ETHTOOL_OPS(netdev, &ops);
+       SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
+
+       retval = register_netdev(netdev);
+       if (retval) {
+               err("error registering netdev: %d", retval);
+               retval = -EIO;
+               goto err_register_netdev;
+       }
+
+       dev_info(&intf->dev, "Apple iPhone USB Ethernet device attached\n");
+       return 0;
+
+err_register_netdev:
+       ipheth_free_urbs(dev);
+err_alloc_urbs:
+err_get_macaddr:
+err_alloc_ctrl_buf:
+       kfree(dev->ctrl_buf);
+err_endpoints:
+       free_netdev(netdev);
+       return retval;
+}
+
+static void ipheth_disconnect(struct usb_interface *intf)
+{
+       struct ipheth_device *dev;
+
+       dev = usb_get_intfdata(intf);
+       if (dev != NULL) {
+               unregister_netdev(dev->net);
+               ipheth_kill_urbs(dev);
+               ipheth_free_urbs(dev);
+               kfree(dev->ctrl_buf);
+               free_netdev(dev->net);
+       }
+       usb_set_intfdata(intf, NULL);
+       dev_info(&intf->dev, "Apple iPhone USB Ethernet now disconnected\n");
+}
+
+static struct usb_driver ipheth_driver = {
+       .name =         "ipheth",
+       .probe =        ipheth_probe,
+       .disconnect =   ipheth_disconnect,
+       .id_table =     ipheth_table,
+};
+
+static int __init ipheth_init(void)
+{
+       int retval;
+
+       retval = usb_register(&ipheth_driver);
+       if (retval) {
+               err("usb_register failed: %d", retval);
+               return retval;
+       }
+       return 0;
+}
+
+static void __exit ipheth_exit(void)
+{
+       usb_deregister(&ipheth_driver);
+}
+
+module_init(ipheth_init);
+module_exit(ipheth_exit);
+
+MODULE_AUTHOR("Diego Giagio <diego@giagio.com>");
+MODULE_DESCRIPTION("Apple iPhone USB Ethernet driver");
+MODULE_LICENSE("Dual BSD/GPL");